core.c 49 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902
  1. /*
  2. * Generic OPP Interface
  3. *
  4. * Copyright (C) 2009-2010 Texas Instruments Incorporated.
  5. * Nishanth Menon
  6. * Romit Dasgupta
  7. * Kevin Hilman
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. */
  13. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  14. #include <linux/clk.h>
  15. #include <linux/errno.h>
  16. #include <linux/err.h>
  17. #include <linux/slab.h>
  18. #include <linux/device.h>
  19. #include <linux/export.h>
  20. #include <linux/pm_domain.h>
  21. #include <linux/regulator/consumer.h>
  22. #include "opp.h"
  23. /*
  24. * The root of the list of all opp-tables. All opp_table structures branch off
  25. * from here, with each opp_table containing the list of opps it supports in
  26. * various states of availability.
  27. */
  28. LIST_HEAD(opp_tables);
  29. /* Lock to allow exclusive modification to the device and opp lists */
  30. DEFINE_MUTEX(opp_table_lock);
  31. static void dev_pm_opp_get(struct dev_pm_opp *opp);
  32. static struct opp_device *_find_opp_dev(const struct device *dev,
  33. struct opp_table *opp_table)
  34. {
  35. struct opp_device *opp_dev;
  36. list_for_each_entry(opp_dev, &opp_table->dev_list, node)
  37. if (opp_dev->dev == dev)
  38. return opp_dev;
  39. return NULL;
  40. }
  41. static struct opp_table *_find_opp_table_unlocked(struct device *dev)
  42. {
  43. struct opp_table *opp_table;
  44. list_for_each_entry(opp_table, &opp_tables, node) {
  45. if (_find_opp_dev(dev, opp_table)) {
  46. _get_opp_table_kref(opp_table);
  47. return opp_table;
  48. }
  49. }
  50. return ERR_PTR(-ENODEV);
  51. }
  52. /**
  53. * _find_opp_table() - find opp_table struct using device pointer
  54. * @dev: device pointer used to lookup OPP table
  55. *
  56. * Search OPP table for one containing matching device.
  57. *
  58. * Return: pointer to 'struct opp_table' if found, otherwise -ENODEV or
  59. * -EINVAL based on type of error.
  60. *
  61. * The callers must call dev_pm_opp_put_opp_table() after the table is used.
  62. */
  63. struct opp_table *_find_opp_table(struct device *dev)
  64. {
  65. struct opp_table *opp_table;
  66. if (IS_ERR_OR_NULL(dev)) {
  67. pr_err("%s: Invalid parameters\n", __func__);
  68. return ERR_PTR(-EINVAL);
  69. }
  70. mutex_lock(&opp_table_lock);
  71. opp_table = _find_opp_table_unlocked(dev);
  72. mutex_unlock(&opp_table_lock);
  73. return opp_table;
  74. }
  75. /**
  76. * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an opp
  77. * @opp: opp for which voltage has to be returned for
  78. *
  79. * Return: voltage in micro volt corresponding to the opp, else
  80. * return 0
  81. *
  82. * This is useful only for devices with single power supply.
  83. */
  84. unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
  85. {
  86. if (IS_ERR_OR_NULL(opp)) {
  87. pr_err("%s: Invalid parameters\n", __func__);
  88. return 0;
  89. }
  90. return opp->supplies[0].u_volt;
  91. }
  92. EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage);
  93. /**
  94. * dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp
  95. * @opp: opp for which frequency has to be returned for
  96. *
  97. * Return: frequency in hertz corresponding to the opp, else
  98. * return 0
  99. */
  100. unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
  101. {
  102. if (IS_ERR_OR_NULL(opp) || !opp->available) {
  103. pr_err("%s: Invalid parameters\n", __func__);
  104. return 0;
  105. }
  106. return opp->rate;
  107. }
  108. EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
  109. /**
  110. * dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not
  111. * @opp: opp for which turbo mode is being verified
  112. *
  113. * Turbo OPPs are not for normal use, and can be enabled (under certain
  114. * conditions) for short duration of times to finish high throughput work
  115. * quickly. Running on them for longer times may overheat the chip.
  116. *
  117. * Return: true if opp is turbo opp, else false.
  118. */
  119. bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp)
  120. {
  121. if (IS_ERR_OR_NULL(opp) || !opp->available) {
  122. pr_err("%s: Invalid parameters\n", __func__);
  123. return false;
  124. }
  125. return opp->turbo;
  126. }
  127. EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo);
  128. /**
  129. * dev_pm_opp_get_max_clock_latency() - Get max clock latency in nanoseconds
  130. * @dev: device for which we do this operation
  131. *
  132. * Return: This function returns the max clock latency in nanoseconds.
  133. */
  134. unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
  135. {
  136. struct opp_table *opp_table;
  137. unsigned long clock_latency_ns;
  138. opp_table = _find_opp_table(dev);
  139. if (IS_ERR(opp_table))
  140. return 0;
  141. clock_latency_ns = opp_table->clock_latency_ns_max;
  142. dev_pm_opp_put_opp_table(opp_table);
  143. return clock_latency_ns;
  144. }
  145. EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency);
  146. /**
  147. * dev_pm_opp_get_max_volt_latency() - Get max voltage latency in nanoseconds
  148. * @dev: device for which we do this operation
  149. *
  150. * Return: This function returns the max voltage latency in nanoseconds.
  151. */
  152. unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
  153. {
  154. struct opp_table *opp_table;
  155. struct dev_pm_opp *opp;
  156. struct regulator *reg;
  157. unsigned long latency_ns = 0;
  158. int ret, i, count;
  159. struct {
  160. unsigned long min;
  161. unsigned long max;
  162. } *uV;
  163. opp_table = _find_opp_table(dev);
  164. if (IS_ERR(opp_table))
  165. return 0;
  166. count = opp_table->regulator_count;
  167. /* Regulator may not be required for the device */
  168. if (!count)
  169. goto put_opp_table;
  170. uV = kmalloc_array(count, sizeof(*uV), GFP_KERNEL);
  171. if (!uV)
  172. goto put_opp_table;
  173. mutex_lock(&opp_table->lock);
  174. for (i = 0; i < count; i++) {
  175. uV[i].min = ~0;
  176. uV[i].max = 0;
  177. list_for_each_entry(opp, &opp_table->opp_list, node) {
  178. if (!opp->available)
  179. continue;
  180. if (opp->supplies[i].u_volt_min < uV[i].min)
  181. uV[i].min = opp->supplies[i].u_volt_min;
  182. if (opp->supplies[i].u_volt_max > uV[i].max)
  183. uV[i].max = opp->supplies[i].u_volt_max;
  184. }
  185. }
  186. mutex_unlock(&opp_table->lock);
  187. /*
  188. * The caller needs to ensure that opp_table (and hence the regulator)
  189. * isn't freed, while we are executing this routine.
  190. */
  191. for (i = 0; i < count; i++) {
  192. reg = opp_table->regulators[i];
  193. ret = regulator_set_voltage_time(reg, uV[i].min, uV[i].max);
  194. if (ret > 0)
  195. latency_ns += ret * 1000;
  196. }
  197. kfree(uV);
  198. put_opp_table:
  199. dev_pm_opp_put_opp_table(opp_table);
  200. return latency_ns;
  201. }
  202. EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_volt_latency);
  203. /**
  204. * dev_pm_opp_get_max_transition_latency() - Get max transition latency in
  205. * nanoseconds
  206. * @dev: device for which we do this operation
  207. *
  208. * Return: This function returns the max transition latency, in nanoseconds, to
  209. * switch from one OPP to other.
  210. */
  211. unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev)
  212. {
  213. return dev_pm_opp_get_max_volt_latency(dev) +
  214. dev_pm_opp_get_max_clock_latency(dev);
  215. }
  216. EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_transition_latency);
  217. /**
  218. * dev_pm_opp_get_suspend_opp_freq() - Get frequency of suspend opp in Hz
  219. * @dev: device for which we do this operation
  220. *
  221. * Return: This function returns the frequency of the OPP marked as suspend_opp
  222. * if one is available, else returns 0;
  223. */
  224. unsigned long dev_pm_opp_get_suspend_opp_freq(struct device *dev)
  225. {
  226. struct opp_table *opp_table;
  227. unsigned long freq = 0;
  228. opp_table = _find_opp_table(dev);
  229. if (IS_ERR(opp_table))
  230. return 0;
  231. if (opp_table->suspend_opp && opp_table->suspend_opp->available)
  232. freq = dev_pm_opp_get_freq(opp_table->suspend_opp);
  233. dev_pm_opp_put_opp_table(opp_table);
  234. return freq;
  235. }
  236. EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp_freq);
  237. int _get_opp_count(struct opp_table *opp_table)
  238. {
  239. struct dev_pm_opp *opp;
  240. int count = 0;
  241. mutex_lock(&opp_table->lock);
  242. list_for_each_entry(opp, &opp_table->opp_list, node) {
  243. if (opp->available)
  244. count++;
  245. }
  246. mutex_unlock(&opp_table->lock);
  247. return count;
  248. }
  249. /**
  250. * dev_pm_opp_get_opp_count() - Get number of opps available in the opp table
  251. * @dev: device for which we do this operation
  252. *
  253. * Return: This function returns the number of available opps if there are any,
  254. * else returns 0 if none or the corresponding error value.
  255. */
  256. int dev_pm_opp_get_opp_count(struct device *dev)
  257. {
  258. struct opp_table *opp_table;
  259. int count;
  260. opp_table = _find_opp_table(dev);
  261. if (IS_ERR(opp_table)) {
  262. count = PTR_ERR(opp_table);
  263. dev_dbg(dev, "%s: OPP table not found (%d)\n",
  264. __func__, count);
  265. return 0;
  266. }
  267. count = _get_opp_count(opp_table);
  268. dev_pm_opp_put_opp_table(opp_table);
  269. return count;
  270. }
  271. EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
  272. /**
  273. * dev_pm_opp_find_freq_exact() - search for an exact frequency
  274. * @dev: device for which we do this operation
  275. * @freq: frequency to search for
  276. * @available: true/false - match for available opp
  277. *
  278. * Return: Searches for exact match in the opp table and returns pointer to the
  279. * matching opp if found, else returns ERR_PTR in case of error and should
  280. * be handled using IS_ERR. Error return values can be:
  281. * EINVAL: for bad pointer
  282. * ERANGE: no match found for search
  283. * ENODEV: if device not found in list of registered devices
  284. *
  285. * Note: available is a modifier for the search. if available=true, then the
  286. * match is for exact matching frequency and is available in the stored OPP
  287. * table. if false, the match is for exact frequency which is not available.
  288. *
  289. * This provides a mechanism to enable an opp which is not available currently
  290. * or the opposite as well.
  291. *
  292. * The callers are required to call dev_pm_opp_put() for the returned OPP after
  293. * use.
  294. */
  295. struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
  296. unsigned long freq,
  297. bool available)
  298. {
  299. struct opp_table *opp_table;
  300. struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
  301. opp_table = _find_opp_table(dev);
  302. if (IS_ERR(opp_table)) {
  303. int r = PTR_ERR(opp_table);
  304. dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r);
  305. return ERR_PTR(r);
  306. }
  307. mutex_lock(&opp_table->lock);
  308. list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
  309. if (temp_opp->available == available &&
  310. temp_opp->rate == freq) {
  311. opp = temp_opp;
  312. /* Increment the reference count of OPP */
  313. dev_pm_opp_get(opp);
  314. break;
  315. }
  316. }
  317. mutex_unlock(&opp_table->lock);
  318. dev_pm_opp_put_opp_table(opp_table);
  319. return opp;
  320. }
  321. EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
  322. static noinline struct dev_pm_opp *_find_freq_ceil(struct opp_table *opp_table,
  323. unsigned long *freq)
  324. {
  325. struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
  326. mutex_lock(&opp_table->lock);
  327. list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
  328. if (temp_opp->available && temp_opp->rate >= *freq) {
  329. opp = temp_opp;
  330. *freq = opp->rate;
  331. /* Increment the reference count of OPP */
  332. dev_pm_opp_get(opp);
  333. break;
  334. }
  335. }
  336. mutex_unlock(&opp_table->lock);
  337. return opp;
  338. }
  339. /**
  340. * dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq
  341. * @dev: device for which we do this operation
  342. * @freq: Start frequency
  343. *
  344. * Search for the matching ceil *available* OPP from a starting freq
  345. * for a device.
  346. *
  347. * Return: matching *opp and refreshes *freq accordingly, else returns
  348. * ERR_PTR in case of error and should be handled using IS_ERR. Error return
  349. * values can be:
  350. * EINVAL: for bad pointer
  351. * ERANGE: no match found for search
  352. * ENODEV: if device not found in list of registered devices
  353. *
  354. * The callers are required to call dev_pm_opp_put() for the returned OPP after
  355. * use.
  356. */
  357. struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
  358. unsigned long *freq)
  359. {
  360. struct opp_table *opp_table;
  361. struct dev_pm_opp *opp;
  362. if (!dev || !freq) {
  363. dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
  364. return ERR_PTR(-EINVAL);
  365. }
  366. opp_table = _find_opp_table(dev);
  367. if (IS_ERR(opp_table))
  368. return ERR_CAST(opp_table);
  369. opp = _find_freq_ceil(opp_table, freq);
  370. dev_pm_opp_put_opp_table(opp_table);
  371. return opp;
  372. }
  373. EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
  374. /**
  375. * dev_pm_opp_find_freq_floor() - Search for a rounded floor freq
  376. * @dev: device for which we do this operation
  377. * @freq: Start frequency
  378. *
  379. * Search for the matching floor *available* OPP from a starting freq
  380. * for a device.
  381. *
  382. * Return: matching *opp and refreshes *freq accordingly, else returns
  383. * ERR_PTR in case of error and should be handled using IS_ERR. Error return
  384. * values can be:
  385. * EINVAL: for bad pointer
  386. * ERANGE: no match found for search
  387. * ENODEV: if device not found in list of registered devices
  388. *
  389. * The callers are required to call dev_pm_opp_put() for the returned OPP after
  390. * use.
  391. */
  392. struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
  393. unsigned long *freq)
  394. {
  395. struct opp_table *opp_table;
  396. struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
  397. if (!dev || !freq) {
  398. dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
  399. return ERR_PTR(-EINVAL);
  400. }
  401. opp_table = _find_opp_table(dev);
  402. if (IS_ERR(opp_table))
  403. return ERR_CAST(opp_table);
  404. mutex_lock(&opp_table->lock);
  405. list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
  406. if (temp_opp->available) {
  407. /* go to the next node, before choosing prev */
  408. if (temp_opp->rate > *freq)
  409. break;
  410. else
  411. opp = temp_opp;
  412. }
  413. }
  414. /* Increment the reference count of OPP */
  415. if (!IS_ERR(opp))
  416. dev_pm_opp_get(opp);
  417. mutex_unlock(&opp_table->lock);
  418. dev_pm_opp_put_opp_table(opp_table);
  419. if (!IS_ERR(opp))
  420. *freq = opp->rate;
  421. return opp;
  422. }
  423. EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
  424. static int _set_opp_voltage(struct device *dev, struct regulator *reg,
  425. struct dev_pm_opp_supply *supply)
  426. {
  427. int ret;
  428. /* Regulator not available for device */
  429. if (IS_ERR(reg)) {
  430. dev_dbg(dev, "%s: regulator not available: %ld\n", __func__,
  431. PTR_ERR(reg));
  432. return 0;
  433. }
  434. dev_dbg(dev, "%s: voltages (mV): %lu %lu %lu\n", __func__,
  435. supply->u_volt_min, supply->u_volt, supply->u_volt_max);
  436. ret = regulator_set_voltage_triplet(reg, supply->u_volt_min,
  437. supply->u_volt, supply->u_volt_max);
  438. if (ret)
  439. dev_err(dev, "%s: failed to set voltage (%lu %lu %lu mV): %d\n",
  440. __func__, supply->u_volt_min, supply->u_volt,
  441. supply->u_volt_max, ret);
  442. return ret;
  443. }
  444. static inline int
  445. _generic_set_opp_clk_only(struct device *dev, struct clk *clk,
  446. unsigned long old_freq, unsigned long freq)
  447. {
  448. int ret;
  449. ret = clk_set_rate(clk, freq);
  450. if (ret) {
  451. dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
  452. ret);
  453. }
  454. return ret;
  455. }
  456. static inline int
  457. _generic_set_opp_domain(struct device *dev, struct clk *clk,
  458. unsigned long old_freq, unsigned long freq,
  459. unsigned int old_pstate, unsigned int new_pstate)
  460. {
  461. int ret;
  462. /* Scaling up? Scale domain performance state before frequency */
  463. if (freq > old_freq) {
  464. ret = dev_pm_genpd_set_performance_state(dev, new_pstate);
  465. if (ret)
  466. return ret;
  467. }
  468. ret = _generic_set_opp_clk_only(dev, clk, old_freq, freq);
  469. if (ret)
  470. goto restore_domain_state;
  471. /* Scaling down? Scale domain performance state after frequency */
  472. if (freq < old_freq) {
  473. ret = dev_pm_genpd_set_performance_state(dev, new_pstate);
  474. if (ret)
  475. goto restore_freq;
  476. }
  477. return 0;
  478. restore_freq:
  479. if (_generic_set_opp_clk_only(dev, clk, freq, old_freq))
  480. dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
  481. __func__, old_freq);
  482. restore_domain_state:
  483. if (freq > old_freq)
  484. dev_pm_genpd_set_performance_state(dev, old_pstate);
  485. return ret;
  486. }
  487. static int _generic_set_opp_regulator(const struct opp_table *opp_table,
  488. struct device *dev,
  489. unsigned long old_freq,
  490. unsigned long freq,
  491. struct dev_pm_opp_supply *old_supply,
  492. struct dev_pm_opp_supply *new_supply)
  493. {
  494. struct regulator *reg = opp_table->regulators[0];
  495. int ret;
  496. /* This function only supports single regulator per device */
  497. if (WARN_ON(opp_table->regulator_count > 1)) {
  498. dev_err(dev, "multiple regulators are not supported\n");
  499. return -EINVAL;
  500. }
  501. /* Scaling up? Scale voltage before frequency */
  502. if (freq > old_freq) {
  503. ret = _set_opp_voltage(dev, reg, new_supply);
  504. if (ret)
  505. goto restore_voltage;
  506. }
  507. /* Change frequency */
  508. ret = _generic_set_opp_clk_only(dev, opp_table->clk, old_freq, freq);
  509. if (ret)
  510. goto restore_voltage;
  511. /* Scaling down? Scale voltage after frequency */
  512. if (freq < old_freq) {
  513. ret = _set_opp_voltage(dev, reg, new_supply);
  514. if (ret)
  515. goto restore_freq;
  516. }
  517. return 0;
  518. restore_freq:
  519. if (_generic_set_opp_clk_only(dev, opp_table->clk, freq, old_freq))
  520. dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
  521. __func__, old_freq);
  522. restore_voltage:
  523. /* This shouldn't harm even if the voltages weren't updated earlier */
  524. if (old_supply)
  525. _set_opp_voltage(dev, reg, old_supply);
  526. return ret;
  527. }
  528. /**
  529. * dev_pm_opp_set_rate() - Configure new OPP based on frequency
  530. * @dev: device for which we do this operation
  531. * @target_freq: frequency to achieve
  532. *
  533. * This configures the power-supplies and clock source to the levels specified
  534. * by the OPP corresponding to the target_freq.
  535. */
  536. int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
  537. {
  538. struct opp_table *opp_table;
  539. unsigned long freq, old_freq;
  540. struct dev_pm_opp *old_opp, *opp;
  541. struct clk *clk;
  542. int ret, size;
  543. if (unlikely(!target_freq)) {
  544. dev_err(dev, "%s: Invalid target frequency %lu\n", __func__,
  545. target_freq);
  546. return -EINVAL;
  547. }
  548. opp_table = _find_opp_table(dev);
  549. if (IS_ERR(opp_table)) {
  550. dev_err(dev, "%s: device opp doesn't exist\n", __func__);
  551. return PTR_ERR(opp_table);
  552. }
  553. clk = opp_table->clk;
  554. if (IS_ERR(clk)) {
  555. dev_err(dev, "%s: No clock available for the device\n",
  556. __func__);
  557. ret = PTR_ERR(clk);
  558. goto put_opp_table;
  559. }
  560. freq = clk_round_rate(clk, target_freq);
  561. if ((long)freq <= 0)
  562. freq = target_freq;
  563. old_freq = clk_get_rate(clk);
  564. /* Return early if nothing to do */
  565. if (old_freq == freq) {
  566. dev_dbg(dev, "%s: old/new frequencies (%lu Hz) are same, nothing to do\n",
  567. __func__, freq);
  568. ret = 0;
  569. goto put_opp_table;
  570. }
  571. old_opp = _find_freq_ceil(opp_table, &old_freq);
  572. if (IS_ERR(old_opp)) {
  573. dev_err(dev, "%s: failed to find current OPP for freq %lu (%ld)\n",
  574. __func__, old_freq, PTR_ERR(old_opp));
  575. }
  576. opp = _find_freq_ceil(opp_table, &freq);
  577. if (IS_ERR(opp)) {
  578. ret = PTR_ERR(opp);
  579. dev_err(dev, "%s: failed to find OPP for freq %lu (%d)\n",
  580. __func__, freq, ret);
  581. goto put_old_opp;
  582. }
  583. dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n", __func__,
  584. old_freq, freq);
  585. /* Only frequency scaling */
  586. if (!opp_table->regulators) {
  587. /*
  588. * We don't support devices with both regulator and
  589. * domain performance-state for now.
  590. */
  591. if (opp_table->genpd_performance_state)
  592. ret = _generic_set_opp_domain(dev, clk, old_freq, freq,
  593. IS_ERR(old_opp) ? 0 : old_opp->pstate,
  594. opp->pstate);
  595. else
  596. ret = _generic_set_opp_clk_only(dev, clk, old_freq, freq);
  597. } else if (!opp_table->set_opp) {
  598. ret = _generic_set_opp_regulator(opp_table, dev, old_freq, freq,
  599. IS_ERR(old_opp) ? NULL : old_opp->supplies,
  600. opp->supplies);
  601. } else {
  602. struct dev_pm_set_opp_data *data;
  603. data = opp_table->set_opp_data;
  604. data->regulators = opp_table->regulators;
  605. data->regulator_count = opp_table->regulator_count;
  606. data->clk = clk;
  607. data->dev = dev;
  608. data->old_opp.rate = old_freq;
  609. size = sizeof(*opp->supplies) * opp_table->regulator_count;
  610. if (IS_ERR(old_opp))
  611. memset(data->old_opp.supplies, 0, size);
  612. else
  613. memcpy(data->old_opp.supplies, old_opp->supplies, size);
  614. data->new_opp.rate = freq;
  615. memcpy(data->new_opp.supplies, opp->supplies, size);
  616. ret = opp_table->set_opp(data);
  617. }
  618. dev_pm_opp_put(opp);
  619. put_old_opp:
  620. if (!IS_ERR(old_opp))
  621. dev_pm_opp_put(old_opp);
  622. put_opp_table:
  623. dev_pm_opp_put_opp_table(opp_table);
  624. return ret;
  625. }
  626. EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate);
  627. /* OPP-dev Helpers */
  628. static void _remove_opp_dev(struct opp_device *opp_dev,
  629. struct opp_table *opp_table)
  630. {
  631. opp_debug_unregister(opp_dev, opp_table);
  632. list_del(&opp_dev->node);
  633. kfree(opp_dev);
  634. }
  635. struct opp_device *_add_opp_dev(const struct device *dev,
  636. struct opp_table *opp_table)
  637. {
  638. struct opp_device *opp_dev;
  639. int ret;
  640. opp_dev = kzalloc(sizeof(*opp_dev), GFP_KERNEL);
  641. if (!opp_dev)
  642. return NULL;
  643. /* Initialize opp-dev */
  644. opp_dev->dev = dev;
  645. list_add(&opp_dev->node, &opp_table->dev_list);
  646. /* Create debugfs entries for the opp_table */
  647. ret = opp_debug_register(opp_dev, opp_table);
  648. if (ret)
  649. dev_err(dev, "%s: Failed to register opp debugfs (%d)\n",
  650. __func__, ret);
  651. return opp_dev;
  652. }
  653. static struct opp_table *_allocate_opp_table(struct device *dev)
  654. {
  655. struct opp_table *opp_table;
  656. struct opp_device *opp_dev;
  657. int ret;
  658. /*
  659. * Allocate a new OPP table. In the infrequent case where a new
  660. * device is needed to be added, we pay this penalty.
  661. */
  662. opp_table = kzalloc(sizeof(*opp_table), GFP_KERNEL);
  663. if (!opp_table)
  664. return NULL;
  665. INIT_LIST_HEAD(&opp_table->dev_list);
  666. opp_dev = _add_opp_dev(dev, opp_table);
  667. if (!opp_dev) {
  668. kfree(opp_table);
  669. return NULL;
  670. }
  671. _of_init_opp_table(opp_table, dev);
  672. /* Find clk for the device */
  673. opp_table->clk = clk_get(dev, NULL);
  674. if (IS_ERR(opp_table->clk)) {
  675. ret = PTR_ERR(opp_table->clk);
  676. if (ret != -EPROBE_DEFER)
  677. dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__,
  678. ret);
  679. }
  680. BLOCKING_INIT_NOTIFIER_HEAD(&opp_table->head);
  681. INIT_LIST_HEAD(&opp_table->opp_list);
  682. mutex_init(&opp_table->lock);
  683. kref_init(&opp_table->kref);
  684. /* Secure the device table modification */
  685. list_add(&opp_table->node, &opp_tables);
  686. return opp_table;
  687. }
  688. void _get_opp_table_kref(struct opp_table *opp_table)
  689. {
  690. kref_get(&opp_table->kref);
  691. }
  692. struct opp_table *dev_pm_opp_get_opp_table(struct device *dev)
  693. {
  694. struct opp_table *opp_table;
  695. /* Hold our table modification lock here */
  696. mutex_lock(&opp_table_lock);
  697. opp_table = _find_opp_table_unlocked(dev);
  698. if (!IS_ERR(opp_table))
  699. goto unlock;
  700. opp_table = _allocate_opp_table(dev);
  701. unlock:
  702. mutex_unlock(&opp_table_lock);
  703. return opp_table;
  704. }
  705. EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_table);
  706. static void _opp_table_kref_release(struct kref *kref)
  707. {
  708. struct opp_table *opp_table = container_of(kref, struct opp_table, kref);
  709. struct opp_device *opp_dev;
  710. /* Release clk */
  711. if (!IS_ERR(opp_table->clk))
  712. clk_put(opp_table->clk);
  713. opp_dev = list_first_entry(&opp_table->dev_list, struct opp_device,
  714. node);
  715. _remove_opp_dev(opp_dev, opp_table);
  716. /* dev_list must be empty now */
  717. WARN_ON(!list_empty(&opp_table->dev_list));
  718. mutex_destroy(&opp_table->lock);
  719. list_del(&opp_table->node);
  720. kfree(opp_table);
  721. mutex_unlock(&opp_table_lock);
  722. }
  723. void dev_pm_opp_put_opp_table(struct opp_table *opp_table)
  724. {
  725. kref_put_mutex(&opp_table->kref, _opp_table_kref_release,
  726. &opp_table_lock);
  727. }
  728. EXPORT_SYMBOL_GPL(dev_pm_opp_put_opp_table);
  729. void _opp_free(struct dev_pm_opp *opp)
  730. {
  731. kfree(opp);
  732. }
  733. static void _opp_kref_release(struct kref *kref)
  734. {
  735. struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref);
  736. struct opp_table *opp_table = opp->opp_table;
  737. /*
  738. * Notify the changes in the availability of the operable
  739. * frequency/voltage list.
  740. */
  741. blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_REMOVE, opp);
  742. opp_debug_remove_one(opp);
  743. list_del(&opp->node);
  744. kfree(opp);
  745. mutex_unlock(&opp_table->lock);
  746. dev_pm_opp_put_opp_table(opp_table);
  747. }
  748. static void dev_pm_opp_get(struct dev_pm_opp *opp)
  749. {
  750. kref_get(&opp->kref);
  751. }
  752. void dev_pm_opp_put(struct dev_pm_opp *opp)
  753. {
  754. kref_put_mutex(&opp->kref, _opp_kref_release, &opp->opp_table->lock);
  755. }
  756. EXPORT_SYMBOL_GPL(dev_pm_opp_put);
  757. /**
  758. * dev_pm_opp_remove() - Remove an OPP from OPP table
  759. * @dev: device for which we do this operation
  760. * @freq: OPP to remove with matching 'freq'
  761. *
  762. * This function removes an opp from the opp table.
  763. */
  764. void dev_pm_opp_remove(struct device *dev, unsigned long freq)
  765. {
  766. struct dev_pm_opp *opp;
  767. struct opp_table *opp_table;
  768. bool found = false;
  769. opp_table = _find_opp_table(dev);
  770. if (IS_ERR(opp_table))
  771. return;
  772. mutex_lock(&opp_table->lock);
  773. list_for_each_entry(opp, &opp_table->opp_list, node) {
  774. if (opp->rate == freq) {
  775. found = true;
  776. break;
  777. }
  778. }
  779. mutex_unlock(&opp_table->lock);
  780. if (found) {
  781. dev_pm_opp_put(opp);
  782. } else {
  783. dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n",
  784. __func__, freq);
  785. }
  786. dev_pm_opp_put_opp_table(opp_table);
  787. }
  788. EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
  789. struct dev_pm_opp *_opp_allocate(struct opp_table *table)
  790. {
  791. struct dev_pm_opp *opp;
  792. int count, supply_size;
  793. /* Allocate space for at least one supply */
  794. count = table->regulator_count ? table->regulator_count : 1;
  795. supply_size = sizeof(*opp->supplies) * count;
  796. /* allocate new OPP node and supplies structures */
  797. opp = kzalloc(sizeof(*opp) + supply_size, GFP_KERNEL);
  798. if (!opp)
  799. return NULL;
  800. /* Put the supplies at the end of the OPP structure as an empty array */
  801. opp->supplies = (struct dev_pm_opp_supply *)(opp + 1);
  802. INIT_LIST_HEAD(&opp->node);
  803. return opp;
  804. }
  805. static bool _opp_supported_by_regulators(struct dev_pm_opp *opp,
  806. struct opp_table *opp_table)
  807. {
  808. struct regulator *reg;
  809. int i;
  810. for (i = 0; i < opp_table->regulator_count; i++) {
  811. reg = opp_table->regulators[i];
  812. if (!regulator_is_supported_voltage(reg,
  813. opp->supplies[i].u_volt_min,
  814. opp->supplies[i].u_volt_max)) {
  815. pr_warn("%s: OPP minuV: %lu maxuV: %lu, not supported by regulator\n",
  816. __func__, opp->supplies[i].u_volt_min,
  817. opp->supplies[i].u_volt_max);
  818. return false;
  819. }
  820. }
  821. return true;
  822. }
  823. static int _opp_is_duplicate(struct device *dev, struct dev_pm_opp *new_opp,
  824. struct opp_table *opp_table,
  825. struct list_head **head)
  826. {
  827. struct dev_pm_opp *opp;
  828. /*
  829. * Insert new OPP in order of increasing frequency and discard if
  830. * already present.
  831. *
  832. * Need to use &opp_table->opp_list in the condition part of the 'for'
  833. * loop, don't replace it with head otherwise it will become an infinite
  834. * loop.
  835. */
  836. list_for_each_entry(opp, &opp_table->opp_list, node) {
  837. if (new_opp->rate > opp->rate) {
  838. *head = &opp->node;
  839. continue;
  840. }
  841. if (new_opp->rate < opp->rate)
  842. return 0;
  843. /* Duplicate OPPs */
  844. dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n",
  845. __func__, opp->rate, opp->supplies[0].u_volt,
  846. opp->available, new_opp->rate,
  847. new_opp->supplies[0].u_volt, new_opp->available);
  848. /* Should we compare voltages for all regulators here ? */
  849. return opp->available &&
  850. new_opp->supplies[0].u_volt == opp->supplies[0].u_volt ? -EBUSY : -EEXIST;
  851. }
  852. return 0;
  853. }
  854. /*
  855. * Returns:
  856. * 0: On success. And appropriate error message for duplicate OPPs.
  857. * -EBUSY: For OPP with same freq/volt and is available. The callers of
  858. * _opp_add() must return 0 if they receive -EBUSY from it. This is to make
  859. * sure we don't print error messages unnecessarily if different parts of
  860. * kernel try to initialize the OPP table.
  861. * -EEXIST: For OPP with same freq but different volt or is unavailable. This
  862. * should be considered an error by the callers of _opp_add().
  863. */
  864. int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
  865. struct opp_table *opp_table, bool rate_not_available)
  866. {
  867. struct list_head *head;
  868. int ret;
  869. mutex_lock(&opp_table->lock);
  870. head = &opp_table->opp_list;
  871. if (likely(!rate_not_available)) {
  872. ret = _opp_is_duplicate(dev, new_opp, opp_table, &head);
  873. if (ret) {
  874. mutex_unlock(&opp_table->lock);
  875. return ret;
  876. }
  877. }
  878. if (opp_table->get_pstate)
  879. new_opp->pstate = opp_table->get_pstate(dev, new_opp->rate);
  880. list_add(&new_opp->node, head);
  881. mutex_unlock(&opp_table->lock);
  882. new_opp->opp_table = opp_table;
  883. kref_init(&new_opp->kref);
  884. /* Get a reference to the OPP table */
  885. _get_opp_table_kref(opp_table);
  886. ret = opp_debug_create_one(new_opp, opp_table);
  887. if (ret)
  888. dev_err(dev, "%s: Failed to register opp to debugfs (%d)\n",
  889. __func__, ret);
  890. if (!_opp_supported_by_regulators(new_opp, opp_table)) {
  891. new_opp->available = false;
  892. dev_warn(dev, "%s: OPP not supported by regulators (%lu)\n",
  893. __func__, new_opp->rate);
  894. }
  895. return 0;
  896. }
  897. /**
  898. * _opp_add_v1() - Allocate a OPP based on v1 bindings.
  899. * @opp_table: OPP table
  900. * @dev: device for which we do this operation
  901. * @freq: Frequency in Hz for this OPP
  902. * @u_volt: Voltage in uVolts for this OPP
  903. * @dynamic: Dynamically added OPPs.
  904. *
  905. * This function adds an opp definition to the opp table and returns status.
  906. * The opp is made available by default and it can be controlled using
  907. * dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove.
  908. *
  909. * NOTE: "dynamic" parameter impacts OPPs added by the dev_pm_opp_of_add_table
  910. * and freed by dev_pm_opp_of_remove_table.
  911. *
  912. * Return:
  913. * 0 On success OR
  914. * Duplicate OPPs (both freq and volt are same) and opp->available
  915. * -EEXIST Freq are same and volt are different OR
  916. * Duplicate OPPs (both freq and volt are same) and !opp->available
  917. * -ENOMEM Memory allocation failure
  918. */
  919. int _opp_add_v1(struct opp_table *opp_table, struct device *dev,
  920. unsigned long freq, long u_volt, bool dynamic)
  921. {
  922. struct dev_pm_opp *new_opp;
  923. unsigned long tol;
  924. int ret;
  925. new_opp = _opp_allocate(opp_table);
  926. if (!new_opp)
  927. return -ENOMEM;
  928. /* populate the opp table */
  929. new_opp->rate = freq;
  930. tol = u_volt * opp_table->voltage_tolerance_v1 / 100;
  931. new_opp->supplies[0].u_volt = u_volt;
  932. new_opp->supplies[0].u_volt_min = u_volt - tol;
  933. new_opp->supplies[0].u_volt_max = u_volt + tol;
  934. new_opp->available = true;
  935. new_opp->dynamic = dynamic;
  936. ret = _opp_add(dev, new_opp, opp_table, false);
  937. if (ret) {
  938. /* Don't return error for duplicate OPPs */
  939. if (ret == -EBUSY)
  940. ret = 0;
  941. goto free_opp;
  942. }
  943. /*
  944. * Notify the changes in the availability of the operable
  945. * frequency/voltage list.
  946. */
  947. blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ADD, new_opp);
  948. return 0;
  949. free_opp:
  950. _opp_free(new_opp);
  951. return ret;
  952. }
  953. /**
  954. * dev_pm_opp_set_supported_hw() - Set supported platforms
  955. * @dev: Device for which supported-hw has to be set.
  956. * @versions: Array of hierarchy of versions to match.
  957. * @count: Number of elements in the array.
  958. *
  959. * This is required only for the V2 bindings, and it enables a platform to
  960. * specify the hierarchy of versions it supports. OPP layer will then enable
  961. * OPPs, which are available for those versions, based on its 'opp-supported-hw'
  962. * property.
  963. */
  964. struct opp_table *dev_pm_opp_set_supported_hw(struct device *dev,
  965. const u32 *versions, unsigned int count)
  966. {
  967. struct opp_table *opp_table;
  968. int ret;
  969. opp_table = dev_pm_opp_get_opp_table(dev);
  970. if (!opp_table)
  971. return ERR_PTR(-ENOMEM);
  972. /* Make sure there are no concurrent readers while updating opp_table */
  973. WARN_ON(!list_empty(&opp_table->opp_list));
  974. /* Do we already have a version hierarchy associated with opp_table? */
  975. if (opp_table->supported_hw) {
  976. dev_err(dev, "%s: Already have supported hardware list\n",
  977. __func__);
  978. ret = -EBUSY;
  979. goto err;
  980. }
  981. opp_table->supported_hw = kmemdup(versions, count * sizeof(*versions),
  982. GFP_KERNEL);
  983. if (!opp_table->supported_hw) {
  984. ret = -ENOMEM;
  985. goto err;
  986. }
  987. opp_table->supported_hw_count = count;
  988. return opp_table;
  989. err:
  990. dev_pm_opp_put_opp_table(opp_table);
  991. return ERR_PTR(ret);
  992. }
  993. EXPORT_SYMBOL_GPL(dev_pm_opp_set_supported_hw);
  994. /**
  995. * dev_pm_opp_put_supported_hw() - Releases resources blocked for supported hw
  996. * @opp_table: OPP table returned by dev_pm_opp_set_supported_hw().
  997. *
  998. * This is required only for the V2 bindings, and is called for a matching
  999. * dev_pm_opp_set_supported_hw(). Until this is called, the opp_table structure
  1000. * will not be freed.
  1001. */
  1002. void dev_pm_opp_put_supported_hw(struct opp_table *opp_table)
  1003. {
  1004. /* Make sure there are no concurrent readers while updating opp_table */
  1005. WARN_ON(!list_empty(&opp_table->opp_list));
  1006. if (!opp_table->supported_hw) {
  1007. pr_err("%s: Doesn't have supported hardware list\n",
  1008. __func__);
  1009. return;
  1010. }
  1011. kfree(opp_table->supported_hw);
  1012. opp_table->supported_hw = NULL;
  1013. opp_table->supported_hw_count = 0;
  1014. dev_pm_opp_put_opp_table(opp_table);
  1015. }
  1016. EXPORT_SYMBOL_GPL(dev_pm_opp_put_supported_hw);
  1017. /**
  1018. * dev_pm_opp_set_prop_name() - Set prop-extn name
  1019. * @dev: Device for which the prop-name has to be set.
  1020. * @name: name to postfix to properties.
  1021. *
  1022. * This is required only for the V2 bindings, and it enables a platform to
  1023. * specify the extn to be used for certain property names. The properties to
  1024. * which the extension will apply are opp-microvolt and opp-microamp. OPP core
  1025. * should postfix the property name with -<name> while looking for them.
  1026. */
  1027. struct opp_table *dev_pm_opp_set_prop_name(struct device *dev, const char *name)
  1028. {
  1029. struct opp_table *opp_table;
  1030. int ret;
  1031. opp_table = dev_pm_opp_get_opp_table(dev);
  1032. if (!opp_table)
  1033. return ERR_PTR(-ENOMEM);
  1034. /* Make sure there are no concurrent readers while updating opp_table */
  1035. WARN_ON(!list_empty(&opp_table->opp_list));
  1036. /* Do we already have a prop-name associated with opp_table? */
  1037. if (opp_table->prop_name) {
  1038. dev_err(dev, "%s: Already have prop-name %s\n", __func__,
  1039. opp_table->prop_name);
  1040. ret = -EBUSY;
  1041. goto err;
  1042. }
  1043. opp_table->prop_name = kstrdup(name, GFP_KERNEL);
  1044. if (!opp_table->prop_name) {
  1045. ret = -ENOMEM;
  1046. goto err;
  1047. }
  1048. return opp_table;
  1049. err:
  1050. dev_pm_opp_put_opp_table(opp_table);
  1051. return ERR_PTR(ret);
  1052. }
  1053. EXPORT_SYMBOL_GPL(dev_pm_opp_set_prop_name);
  1054. /**
  1055. * dev_pm_opp_put_prop_name() - Releases resources blocked for prop-name
  1056. * @opp_table: OPP table returned by dev_pm_opp_set_prop_name().
  1057. *
  1058. * This is required only for the V2 bindings, and is called for a matching
  1059. * dev_pm_opp_set_prop_name(). Until this is called, the opp_table structure
  1060. * will not be freed.
  1061. */
  1062. void dev_pm_opp_put_prop_name(struct opp_table *opp_table)
  1063. {
  1064. /* Make sure there are no concurrent readers while updating opp_table */
  1065. WARN_ON(!list_empty(&opp_table->opp_list));
  1066. if (!opp_table->prop_name) {
  1067. pr_err("%s: Doesn't have a prop-name\n", __func__);
  1068. return;
  1069. }
  1070. kfree(opp_table->prop_name);
  1071. opp_table->prop_name = NULL;
  1072. dev_pm_opp_put_opp_table(opp_table);
  1073. }
  1074. EXPORT_SYMBOL_GPL(dev_pm_opp_put_prop_name);
  1075. static int _allocate_set_opp_data(struct opp_table *opp_table)
  1076. {
  1077. struct dev_pm_set_opp_data *data;
  1078. int len, count = opp_table->regulator_count;
  1079. if (WARN_ON(!count))
  1080. return -EINVAL;
  1081. /* space for set_opp_data */
  1082. len = sizeof(*data);
  1083. /* space for old_opp.supplies and new_opp.supplies */
  1084. len += 2 * sizeof(struct dev_pm_opp_supply) * count;
  1085. data = kzalloc(len, GFP_KERNEL);
  1086. if (!data)
  1087. return -ENOMEM;
  1088. data->old_opp.supplies = (void *)(data + 1);
  1089. data->new_opp.supplies = data->old_opp.supplies + count;
  1090. opp_table->set_opp_data = data;
  1091. return 0;
  1092. }
  1093. static void _free_set_opp_data(struct opp_table *opp_table)
  1094. {
  1095. kfree(opp_table->set_opp_data);
  1096. opp_table->set_opp_data = NULL;
  1097. }
  1098. /**
  1099. * dev_pm_opp_set_regulators() - Set regulator names for the device
  1100. * @dev: Device for which regulator name is being set.
  1101. * @names: Array of pointers to the names of the regulator.
  1102. * @count: Number of regulators.
  1103. *
  1104. * In order to support OPP switching, OPP layer needs to know the name of the
  1105. * device's regulators, as the core would be required to switch voltages as
  1106. * well.
  1107. *
  1108. * This must be called before any OPPs are initialized for the device.
  1109. */
  1110. struct opp_table *dev_pm_opp_set_regulators(struct device *dev,
  1111. const char * const names[],
  1112. unsigned int count)
  1113. {
  1114. struct opp_table *opp_table;
  1115. struct regulator *reg;
  1116. int ret, i;
  1117. opp_table = dev_pm_opp_get_opp_table(dev);
  1118. if (!opp_table)
  1119. return ERR_PTR(-ENOMEM);
  1120. /* This should be called before OPPs are initialized */
  1121. if (WARN_ON(!list_empty(&opp_table->opp_list))) {
  1122. ret = -EBUSY;
  1123. goto err;
  1124. }
  1125. /* Already have regulators set */
  1126. if (opp_table->regulators) {
  1127. ret = -EBUSY;
  1128. goto err;
  1129. }
  1130. opp_table->regulators = kmalloc_array(count,
  1131. sizeof(*opp_table->regulators),
  1132. GFP_KERNEL);
  1133. if (!opp_table->regulators) {
  1134. ret = -ENOMEM;
  1135. goto err;
  1136. }
  1137. for (i = 0; i < count; i++) {
  1138. reg = regulator_get_optional(dev, names[i]);
  1139. if (IS_ERR(reg)) {
  1140. ret = PTR_ERR(reg);
  1141. if (ret != -EPROBE_DEFER)
  1142. dev_err(dev, "%s: no regulator (%s) found: %d\n",
  1143. __func__, names[i], ret);
  1144. goto free_regulators;
  1145. }
  1146. opp_table->regulators[i] = reg;
  1147. }
  1148. opp_table->regulator_count = count;
  1149. /* Allocate block only once to pass to set_opp() routines */
  1150. ret = _allocate_set_opp_data(opp_table);
  1151. if (ret)
  1152. goto free_regulators;
  1153. return opp_table;
  1154. free_regulators:
  1155. while (i != 0)
  1156. regulator_put(opp_table->regulators[--i]);
  1157. kfree(opp_table->regulators);
  1158. opp_table->regulators = NULL;
  1159. opp_table->regulator_count = 0;
  1160. err:
  1161. dev_pm_opp_put_opp_table(opp_table);
  1162. return ERR_PTR(ret);
  1163. }
  1164. EXPORT_SYMBOL_GPL(dev_pm_opp_set_regulators);
  1165. /**
  1166. * dev_pm_opp_put_regulators() - Releases resources blocked for regulator
  1167. * @opp_table: OPP table returned from dev_pm_opp_set_regulators().
  1168. */
  1169. void dev_pm_opp_put_regulators(struct opp_table *opp_table)
  1170. {
  1171. int i;
  1172. if (!opp_table->regulators) {
  1173. pr_err("%s: Doesn't have regulators set\n", __func__);
  1174. return;
  1175. }
  1176. /* Make sure there are no concurrent readers while updating opp_table */
  1177. WARN_ON(!list_empty(&opp_table->opp_list));
  1178. for (i = opp_table->regulator_count - 1; i >= 0; i--)
  1179. regulator_put(opp_table->regulators[i]);
  1180. _free_set_opp_data(opp_table);
  1181. kfree(opp_table->regulators);
  1182. opp_table->regulators = NULL;
  1183. opp_table->regulator_count = 0;
  1184. dev_pm_opp_put_opp_table(opp_table);
  1185. }
  1186. EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulators);
  1187. /**
  1188. * dev_pm_opp_set_clkname() - Set clk name for the device
  1189. * @dev: Device for which clk name is being set.
  1190. * @name: Clk name.
  1191. *
  1192. * In order to support OPP switching, OPP layer needs to get pointer to the
  1193. * clock for the device. Simple cases work fine without using this routine (i.e.
  1194. * by passing connection-id as NULL), but for a device with multiple clocks
  1195. * available, the OPP core needs to know the exact name of the clk to use.
  1196. *
  1197. * This must be called before any OPPs are initialized for the device.
  1198. */
  1199. struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char *name)
  1200. {
  1201. struct opp_table *opp_table;
  1202. int ret;
  1203. opp_table = dev_pm_opp_get_opp_table(dev);
  1204. if (!opp_table)
  1205. return ERR_PTR(-ENOMEM);
  1206. /* This should be called before OPPs are initialized */
  1207. if (WARN_ON(!list_empty(&opp_table->opp_list))) {
  1208. ret = -EBUSY;
  1209. goto err;
  1210. }
  1211. /* Already have default clk set, free it */
  1212. if (!IS_ERR(opp_table->clk))
  1213. clk_put(opp_table->clk);
  1214. /* Find clk for the device */
  1215. opp_table->clk = clk_get(dev, name);
  1216. if (IS_ERR(opp_table->clk)) {
  1217. ret = PTR_ERR(opp_table->clk);
  1218. if (ret != -EPROBE_DEFER) {
  1219. dev_err(dev, "%s: Couldn't find clock: %d\n", __func__,
  1220. ret);
  1221. }
  1222. goto err;
  1223. }
  1224. return opp_table;
  1225. err:
  1226. dev_pm_opp_put_opp_table(opp_table);
  1227. return ERR_PTR(ret);
  1228. }
  1229. EXPORT_SYMBOL_GPL(dev_pm_opp_set_clkname);
  1230. /**
  1231. * dev_pm_opp_put_clkname() - Releases resources blocked for clk.
  1232. * @opp_table: OPP table returned from dev_pm_opp_set_clkname().
  1233. */
  1234. void dev_pm_opp_put_clkname(struct opp_table *opp_table)
  1235. {
  1236. /* Make sure there are no concurrent readers while updating opp_table */
  1237. WARN_ON(!list_empty(&opp_table->opp_list));
  1238. clk_put(opp_table->clk);
  1239. opp_table->clk = ERR_PTR(-EINVAL);
  1240. dev_pm_opp_put_opp_table(opp_table);
  1241. }
  1242. EXPORT_SYMBOL_GPL(dev_pm_opp_put_clkname);
  1243. /**
  1244. * dev_pm_opp_register_set_opp_helper() - Register custom set OPP helper
  1245. * @dev: Device for which the helper is getting registered.
  1246. * @set_opp: Custom set OPP helper.
  1247. *
  1248. * This is useful to support complex platforms (like platforms with multiple
  1249. * regulators per device), instead of the generic OPP set rate helper.
  1250. *
  1251. * This must be called before any OPPs are initialized for the device.
  1252. */
  1253. struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev,
  1254. int (*set_opp)(struct dev_pm_set_opp_data *data))
  1255. {
  1256. struct opp_table *opp_table;
  1257. int ret;
  1258. if (!set_opp)
  1259. return ERR_PTR(-EINVAL);
  1260. opp_table = dev_pm_opp_get_opp_table(dev);
  1261. if (!opp_table)
  1262. return ERR_PTR(-ENOMEM);
  1263. /* This should be called before OPPs are initialized */
  1264. if (WARN_ON(!list_empty(&opp_table->opp_list))) {
  1265. ret = -EBUSY;
  1266. goto err;
  1267. }
  1268. /* Already have custom set_opp helper */
  1269. if (WARN_ON(opp_table->set_opp)) {
  1270. ret = -EBUSY;
  1271. goto err;
  1272. }
  1273. opp_table->set_opp = set_opp;
  1274. return opp_table;
  1275. err:
  1276. dev_pm_opp_put_opp_table(opp_table);
  1277. return ERR_PTR(ret);
  1278. }
  1279. EXPORT_SYMBOL_GPL(dev_pm_opp_register_set_opp_helper);
  1280. /**
  1281. * dev_pm_opp_unregister_set_opp_helper() - Releases resources blocked for
  1282. * set_opp helper
  1283. * @opp_table: OPP table returned from dev_pm_opp_register_set_opp_helper().
  1284. *
  1285. * Release resources blocked for platform specific set_opp helper.
  1286. */
  1287. void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table)
  1288. {
  1289. if (!opp_table->set_opp) {
  1290. pr_err("%s: Doesn't have custom set_opp helper set\n",
  1291. __func__);
  1292. return;
  1293. }
  1294. /* Make sure there are no concurrent readers while updating opp_table */
  1295. WARN_ON(!list_empty(&opp_table->opp_list));
  1296. opp_table->set_opp = NULL;
  1297. dev_pm_opp_put_opp_table(opp_table);
  1298. }
  1299. EXPORT_SYMBOL_GPL(dev_pm_opp_unregister_set_opp_helper);
  1300. /**
  1301. * dev_pm_opp_register_get_pstate_helper() - Register get_pstate() helper.
  1302. * @dev: Device for which the helper is getting registered.
  1303. * @get_pstate: Helper.
  1304. *
  1305. * TODO: Remove this callback after the same information is available via Device
  1306. * Tree.
  1307. *
  1308. * This allows a platform to initialize the performance states of individual
  1309. * OPPs for its devices, until we get similar information directly from DT.
  1310. *
  1311. * This must be called before the OPPs are initialized for the device.
  1312. */
  1313. struct opp_table *dev_pm_opp_register_get_pstate_helper(struct device *dev,
  1314. int (*get_pstate)(struct device *dev, unsigned long rate))
  1315. {
  1316. struct opp_table *opp_table;
  1317. int ret;
  1318. if (!get_pstate)
  1319. return ERR_PTR(-EINVAL);
  1320. opp_table = dev_pm_opp_get_opp_table(dev);
  1321. if (!opp_table)
  1322. return ERR_PTR(-ENOMEM);
  1323. /* This should be called before OPPs are initialized */
  1324. if (WARN_ON(!list_empty(&opp_table->opp_list))) {
  1325. ret = -EBUSY;
  1326. goto err;
  1327. }
  1328. /* Already have genpd_performance_state set */
  1329. if (WARN_ON(opp_table->genpd_performance_state)) {
  1330. ret = -EBUSY;
  1331. goto err;
  1332. }
  1333. opp_table->genpd_performance_state = true;
  1334. opp_table->get_pstate = get_pstate;
  1335. return opp_table;
  1336. err:
  1337. dev_pm_opp_put_opp_table(opp_table);
  1338. return ERR_PTR(ret);
  1339. }
  1340. EXPORT_SYMBOL_GPL(dev_pm_opp_register_get_pstate_helper);
  1341. /**
  1342. * dev_pm_opp_unregister_get_pstate_helper() - Releases resources blocked for
  1343. * get_pstate() helper
  1344. * @opp_table: OPP table returned from dev_pm_opp_register_get_pstate_helper().
  1345. *
  1346. * Release resources blocked for platform specific get_pstate() helper.
  1347. */
  1348. void dev_pm_opp_unregister_get_pstate_helper(struct opp_table *opp_table)
  1349. {
  1350. if (!opp_table->genpd_performance_state) {
  1351. pr_err("%s: Doesn't have performance states set\n",
  1352. __func__);
  1353. return;
  1354. }
  1355. /* Make sure there are no concurrent readers while updating opp_table */
  1356. WARN_ON(!list_empty(&opp_table->opp_list));
  1357. opp_table->genpd_performance_state = false;
  1358. opp_table->get_pstate = NULL;
  1359. dev_pm_opp_put_opp_table(opp_table);
  1360. }
  1361. EXPORT_SYMBOL_GPL(dev_pm_opp_unregister_get_pstate_helper);
  1362. /**
  1363. * dev_pm_opp_add() - Add an OPP table from a table definitions
  1364. * @dev: device for which we do this operation
  1365. * @freq: Frequency in Hz for this OPP
  1366. * @u_volt: Voltage in uVolts for this OPP
  1367. *
  1368. * This function adds an opp definition to the opp table and returns status.
  1369. * The opp is made available by default and it can be controlled using
  1370. * dev_pm_opp_enable/disable functions.
  1371. *
  1372. * Return:
  1373. * 0 On success OR
  1374. * Duplicate OPPs (both freq and volt are same) and opp->available
  1375. * -EEXIST Freq are same and volt are different OR
  1376. * Duplicate OPPs (both freq and volt are same) and !opp->available
  1377. * -ENOMEM Memory allocation failure
  1378. */
  1379. int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
  1380. {
  1381. struct opp_table *opp_table;
  1382. int ret;
  1383. opp_table = dev_pm_opp_get_opp_table(dev);
  1384. if (!opp_table)
  1385. return -ENOMEM;
  1386. ret = _opp_add_v1(opp_table, dev, freq, u_volt, true);
  1387. dev_pm_opp_put_opp_table(opp_table);
  1388. return ret;
  1389. }
  1390. EXPORT_SYMBOL_GPL(dev_pm_opp_add);
  1391. /**
  1392. * _opp_set_availability() - helper to set the availability of an opp
  1393. * @dev: device for which we do this operation
  1394. * @freq: OPP frequency to modify availability
  1395. * @availability_req: availability status requested for this opp
  1396. *
  1397. * Set the availability of an OPP, opp_{enable,disable} share a common logic
  1398. * which is isolated here.
  1399. *
  1400. * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
  1401. * copy operation, returns 0 if no modification was done OR modification was
  1402. * successful.
  1403. */
  1404. static int _opp_set_availability(struct device *dev, unsigned long freq,
  1405. bool availability_req)
  1406. {
  1407. struct opp_table *opp_table;
  1408. struct dev_pm_opp *tmp_opp, *opp = ERR_PTR(-ENODEV);
  1409. int r = 0;
  1410. /* Find the opp_table */
  1411. opp_table = _find_opp_table(dev);
  1412. if (IS_ERR(opp_table)) {
  1413. r = PTR_ERR(opp_table);
  1414. dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
  1415. return r;
  1416. }
  1417. mutex_lock(&opp_table->lock);
  1418. /* Do we have the frequency? */
  1419. list_for_each_entry(tmp_opp, &opp_table->opp_list, node) {
  1420. if (tmp_opp->rate == freq) {
  1421. opp = tmp_opp;
  1422. break;
  1423. }
  1424. }
  1425. if (IS_ERR(opp)) {
  1426. r = PTR_ERR(opp);
  1427. goto unlock;
  1428. }
  1429. /* Is update really needed? */
  1430. if (opp->available == availability_req)
  1431. goto unlock;
  1432. opp->available = availability_req;
  1433. dev_pm_opp_get(opp);
  1434. mutex_unlock(&opp_table->lock);
  1435. /* Notify the change of the OPP availability */
  1436. if (availability_req)
  1437. blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ENABLE,
  1438. opp);
  1439. else
  1440. blocking_notifier_call_chain(&opp_table->head,
  1441. OPP_EVENT_DISABLE, opp);
  1442. dev_pm_opp_put(opp);
  1443. goto put_table;
  1444. unlock:
  1445. mutex_unlock(&opp_table->lock);
  1446. put_table:
  1447. dev_pm_opp_put_opp_table(opp_table);
  1448. return r;
  1449. }
  1450. /**
  1451. * dev_pm_opp_enable() - Enable a specific OPP
  1452. * @dev: device for which we do this operation
  1453. * @freq: OPP frequency to enable
  1454. *
  1455. * Enables a provided opp. If the operation is valid, this returns 0, else the
  1456. * corresponding error value. It is meant to be used for users an OPP available
  1457. * after being temporarily made unavailable with dev_pm_opp_disable.
  1458. *
  1459. * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
  1460. * copy operation, returns 0 if no modification was done OR modification was
  1461. * successful.
  1462. */
  1463. int dev_pm_opp_enable(struct device *dev, unsigned long freq)
  1464. {
  1465. return _opp_set_availability(dev, freq, true);
  1466. }
  1467. EXPORT_SYMBOL_GPL(dev_pm_opp_enable);
  1468. /**
  1469. * dev_pm_opp_disable() - Disable a specific OPP
  1470. * @dev: device for which we do this operation
  1471. * @freq: OPP frequency to disable
  1472. *
  1473. * Disables a provided opp. If the operation is valid, this returns
  1474. * 0, else the corresponding error value. It is meant to be a temporary
  1475. * control by users to make this OPP not available until the circumstances are
  1476. * right to make it available again (with a call to dev_pm_opp_enable).
  1477. *
  1478. * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
  1479. * copy operation, returns 0 if no modification was done OR modification was
  1480. * successful.
  1481. */
  1482. int dev_pm_opp_disable(struct device *dev, unsigned long freq)
  1483. {
  1484. return _opp_set_availability(dev, freq, false);
  1485. }
  1486. EXPORT_SYMBOL_GPL(dev_pm_opp_disable);
  1487. /**
  1488. * dev_pm_opp_register_notifier() - Register OPP notifier for the device
  1489. * @dev: Device for which notifier needs to be registered
  1490. * @nb: Notifier block to be registered
  1491. *
  1492. * Return: 0 on success or a negative error value.
  1493. */
  1494. int dev_pm_opp_register_notifier(struct device *dev, struct notifier_block *nb)
  1495. {
  1496. struct opp_table *opp_table;
  1497. int ret;
  1498. opp_table = _find_opp_table(dev);
  1499. if (IS_ERR(opp_table))
  1500. return PTR_ERR(opp_table);
  1501. ret = blocking_notifier_chain_register(&opp_table->head, nb);
  1502. dev_pm_opp_put_opp_table(opp_table);
  1503. return ret;
  1504. }
  1505. EXPORT_SYMBOL(dev_pm_opp_register_notifier);
  1506. /**
  1507. * dev_pm_opp_unregister_notifier() - Unregister OPP notifier for the device
  1508. * @dev: Device for which notifier needs to be unregistered
  1509. * @nb: Notifier block to be unregistered
  1510. *
  1511. * Return: 0 on success or a negative error value.
  1512. */
  1513. int dev_pm_opp_unregister_notifier(struct device *dev,
  1514. struct notifier_block *nb)
  1515. {
  1516. struct opp_table *opp_table;
  1517. int ret;
  1518. opp_table = _find_opp_table(dev);
  1519. if (IS_ERR(opp_table))
  1520. return PTR_ERR(opp_table);
  1521. ret = blocking_notifier_chain_unregister(&opp_table->head, nb);
  1522. dev_pm_opp_put_opp_table(opp_table);
  1523. return ret;
  1524. }
  1525. EXPORT_SYMBOL(dev_pm_opp_unregister_notifier);
  1526. /*
  1527. * Free OPPs either created using static entries present in DT or even the
  1528. * dynamically added entries based on remove_all param.
  1529. */
  1530. void _dev_pm_opp_remove_table(struct opp_table *opp_table, struct device *dev,
  1531. bool remove_all)
  1532. {
  1533. struct dev_pm_opp *opp, *tmp;
  1534. /* Find if opp_table manages a single device */
  1535. if (list_is_singular(&opp_table->dev_list)) {
  1536. /* Free static OPPs */
  1537. list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) {
  1538. if (remove_all || !opp->dynamic)
  1539. dev_pm_opp_put(opp);
  1540. }
  1541. /*
  1542. * The OPP table is getting removed, drop the performance state
  1543. * constraints.
  1544. */
  1545. if (opp_table->genpd_performance_state)
  1546. dev_pm_genpd_set_performance_state(dev, 0);
  1547. } else {
  1548. _remove_opp_dev(_find_opp_dev(dev, opp_table), opp_table);
  1549. }
  1550. }
  1551. void _dev_pm_opp_find_and_remove_table(struct device *dev, bool remove_all)
  1552. {
  1553. struct opp_table *opp_table;
  1554. /* Check for existing table for 'dev' */
  1555. opp_table = _find_opp_table(dev);
  1556. if (IS_ERR(opp_table)) {
  1557. int error = PTR_ERR(opp_table);
  1558. if (error != -ENODEV)
  1559. WARN(1, "%s: opp_table: %d\n",
  1560. IS_ERR_OR_NULL(dev) ?
  1561. "Invalid device" : dev_name(dev),
  1562. error);
  1563. return;
  1564. }
  1565. _dev_pm_opp_remove_table(opp_table, dev, remove_all);
  1566. dev_pm_opp_put_opp_table(opp_table);
  1567. }
  1568. /**
  1569. * dev_pm_opp_remove_table() - Free all OPPs associated with the device
  1570. * @dev: device pointer used to lookup OPP table.
  1571. *
  1572. * Free both OPPs created using static entries present in DT and the
  1573. * dynamically added entries.
  1574. */
  1575. void dev_pm_opp_remove_table(struct device *dev)
  1576. {
  1577. _dev_pm_opp_find_and_remove_table(dev, true);
  1578. }
  1579. EXPORT_SYMBOL_GPL(dev_pm_opp_remove_table);