core.c 47 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818
  1. /*
  2. * Generic OPP Interface
  3. *
  4. * Copyright (C) 2009-2010 Texas Instruments Incorporated.
  5. * Nishanth Menon
  6. * Romit Dasgupta
  7. * Kevin Hilman
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. */
  13. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  14. #include <linux/clk.h>
  15. #include <linux/errno.h>
  16. #include <linux/err.h>
  17. #include <linux/slab.h>
  18. #include <linux/device.h>
  19. #include <linux/export.h>
  20. #include <linux/pm_domain.h>
  21. #include <linux/regulator/consumer.h>
  22. #include "opp.h"
  23. /*
  24. * The root of the list of all opp-tables. All opp_table structures branch off
  25. * from here, with each opp_table containing the list of opps it supports in
  26. * various states of availability.
  27. */
  28. LIST_HEAD(opp_tables);
  29. /* Lock to allow exclusive modification to the device and opp lists */
  30. DEFINE_MUTEX(opp_table_lock);
  31. static struct opp_device *_find_opp_dev(const struct device *dev,
  32. struct opp_table *opp_table)
  33. {
  34. struct opp_device *opp_dev;
  35. list_for_each_entry(opp_dev, &opp_table->dev_list, node)
  36. if (opp_dev->dev == dev)
  37. return opp_dev;
  38. return NULL;
  39. }
  40. static struct opp_table *_find_opp_table_unlocked(struct device *dev)
  41. {
  42. struct opp_table *opp_table;
  43. bool found;
  44. list_for_each_entry(opp_table, &opp_tables, node) {
  45. mutex_lock(&opp_table->lock);
  46. found = !!_find_opp_dev(dev, opp_table);
  47. mutex_unlock(&opp_table->lock);
  48. if (found) {
  49. _get_opp_table_kref(opp_table);
  50. return opp_table;
  51. }
  52. }
  53. return ERR_PTR(-ENODEV);
  54. }
  55. /**
  56. * _find_opp_table() - find opp_table struct using device pointer
  57. * @dev: device pointer used to lookup OPP table
  58. *
  59. * Search OPP table for one containing matching device.
  60. *
  61. * Return: pointer to 'struct opp_table' if found, otherwise -ENODEV or
  62. * -EINVAL based on type of error.
  63. *
  64. * The callers must call dev_pm_opp_put_opp_table() after the table is used.
  65. */
  66. struct opp_table *_find_opp_table(struct device *dev)
  67. {
  68. struct opp_table *opp_table;
  69. if (IS_ERR_OR_NULL(dev)) {
  70. pr_err("%s: Invalid parameters\n", __func__);
  71. return ERR_PTR(-EINVAL);
  72. }
  73. mutex_lock(&opp_table_lock);
  74. opp_table = _find_opp_table_unlocked(dev);
  75. mutex_unlock(&opp_table_lock);
  76. return opp_table;
  77. }
  78. /**
  79. * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an opp
  80. * @opp: opp for which voltage has to be returned for
  81. *
  82. * Return: voltage in micro volt corresponding to the opp, else
  83. * return 0
  84. *
  85. * This is useful only for devices with single power supply.
  86. */
  87. unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
  88. {
  89. if (IS_ERR_OR_NULL(opp)) {
  90. pr_err("%s: Invalid parameters\n", __func__);
  91. return 0;
  92. }
  93. return opp->supplies[0].u_volt;
  94. }
  95. EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage);
  96. /**
  97. * dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp
  98. * @opp: opp for which frequency has to be returned for
  99. *
  100. * Return: frequency in hertz corresponding to the opp, else
  101. * return 0
  102. */
  103. unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
  104. {
  105. if (IS_ERR_OR_NULL(opp) || !opp->available) {
  106. pr_err("%s: Invalid parameters\n", __func__);
  107. return 0;
  108. }
  109. return opp->rate;
  110. }
  111. EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
  112. /**
  113. * dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not
  114. * @opp: opp for which turbo mode is being verified
  115. *
  116. * Turbo OPPs are not for normal use, and can be enabled (under certain
  117. * conditions) for short duration of times to finish high throughput work
  118. * quickly. Running on them for longer times may overheat the chip.
  119. *
  120. * Return: true if opp is turbo opp, else false.
  121. */
  122. bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp)
  123. {
  124. if (IS_ERR_OR_NULL(opp) || !opp->available) {
  125. pr_err("%s: Invalid parameters\n", __func__);
  126. return false;
  127. }
  128. return opp->turbo;
  129. }
  130. EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo);
  131. /**
  132. * dev_pm_opp_get_max_clock_latency() - Get max clock latency in nanoseconds
  133. * @dev: device for which we do this operation
  134. *
  135. * Return: This function returns the max clock latency in nanoseconds.
  136. */
  137. unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
  138. {
  139. struct opp_table *opp_table;
  140. unsigned long clock_latency_ns;
  141. opp_table = _find_opp_table(dev);
  142. if (IS_ERR(opp_table))
  143. return 0;
  144. clock_latency_ns = opp_table->clock_latency_ns_max;
  145. dev_pm_opp_put_opp_table(opp_table);
  146. return clock_latency_ns;
  147. }
  148. EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency);
  149. /**
  150. * dev_pm_opp_get_max_volt_latency() - Get max voltage latency in nanoseconds
  151. * @dev: device for which we do this operation
  152. *
  153. * Return: This function returns the max voltage latency in nanoseconds.
  154. */
  155. unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
  156. {
  157. struct opp_table *opp_table;
  158. struct dev_pm_opp *opp;
  159. struct regulator *reg;
  160. unsigned long latency_ns = 0;
  161. int ret, i, count;
  162. struct {
  163. unsigned long min;
  164. unsigned long max;
  165. } *uV;
  166. opp_table = _find_opp_table(dev);
  167. if (IS_ERR(opp_table))
  168. return 0;
  169. count = opp_table->regulator_count;
  170. /* Regulator may not be required for the device */
  171. if (!count)
  172. goto put_opp_table;
  173. uV = kmalloc_array(count, sizeof(*uV), GFP_KERNEL);
  174. if (!uV)
  175. goto put_opp_table;
  176. mutex_lock(&opp_table->lock);
  177. for (i = 0; i < count; i++) {
  178. uV[i].min = ~0;
  179. uV[i].max = 0;
  180. list_for_each_entry(opp, &opp_table->opp_list, node) {
  181. if (!opp->available)
  182. continue;
  183. if (opp->supplies[i].u_volt_min < uV[i].min)
  184. uV[i].min = opp->supplies[i].u_volt_min;
  185. if (opp->supplies[i].u_volt_max > uV[i].max)
  186. uV[i].max = opp->supplies[i].u_volt_max;
  187. }
  188. }
  189. mutex_unlock(&opp_table->lock);
  190. /*
  191. * The caller needs to ensure that opp_table (and hence the regulator)
  192. * isn't freed, while we are executing this routine.
  193. */
  194. for (i = 0; i < count; i++) {
  195. reg = opp_table->regulators[i];
  196. ret = regulator_set_voltage_time(reg, uV[i].min, uV[i].max);
  197. if (ret > 0)
  198. latency_ns += ret * 1000;
  199. }
  200. kfree(uV);
  201. put_opp_table:
  202. dev_pm_opp_put_opp_table(opp_table);
  203. return latency_ns;
  204. }
  205. EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_volt_latency);
  206. /**
  207. * dev_pm_opp_get_max_transition_latency() - Get max transition latency in
  208. * nanoseconds
  209. * @dev: device for which we do this operation
  210. *
  211. * Return: This function returns the max transition latency, in nanoseconds, to
  212. * switch from one OPP to other.
  213. */
  214. unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev)
  215. {
  216. return dev_pm_opp_get_max_volt_latency(dev) +
  217. dev_pm_opp_get_max_clock_latency(dev);
  218. }
  219. EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_transition_latency);
  220. /**
  221. * dev_pm_opp_get_suspend_opp_freq() - Get frequency of suspend opp in Hz
  222. * @dev: device for which we do this operation
  223. *
  224. * Return: This function returns the frequency of the OPP marked as suspend_opp
  225. * if one is available, else returns 0;
  226. */
  227. unsigned long dev_pm_opp_get_suspend_opp_freq(struct device *dev)
  228. {
  229. struct opp_table *opp_table;
  230. unsigned long freq = 0;
  231. opp_table = _find_opp_table(dev);
  232. if (IS_ERR(opp_table))
  233. return 0;
  234. if (opp_table->suspend_opp && opp_table->suspend_opp->available)
  235. freq = dev_pm_opp_get_freq(opp_table->suspend_opp);
  236. dev_pm_opp_put_opp_table(opp_table);
  237. return freq;
  238. }
  239. EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp_freq);
  240. int _get_opp_count(struct opp_table *opp_table)
  241. {
  242. struct dev_pm_opp *opp;
  243. int count = 0;
  244. mutex_lock(&opp_table->lock);
  245. list_for_each_entry(opp, &opp_table->opp_list, node) {
  246. if (opp->available)
  247. count++;
  248. }
  249. mutex_unlock(&opp_table->lock);
  250. return count;
  251. }
  252. /**
  253. * dev_pm_opp_get_opp_count() - Get number of opps available in the opp table
  254. * @dev: device for which we do this operation
  255. *
  256. * Return: This function returns the number of available opps if there are any,
  257. * else returns 0 if none or the corresponding error value.
  258. */
  259. int dev_pm_opp_get_opp_count(struct device *dev)
  260. {
  261. struct opp_table *opp_table;
  262. int count;
  263. opp_table = _find_opp_table(dev);
  264. if (IS_ERR(opp_table)) {
  265. count = PTR_ERR(opp_table);
  266. dev_dbg(dev, "%s: OPP table not found (%d)\n",
  267. __func__, count);
  268. return count;
  269. }
  270. count = _get_opp_count(opp_table);
  271. dev_pm_opp_put_opp_table(opp_table);
  272. return count;
  273. }
  274. EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
  275. /**
  276. * dev_pm_opp_find_freq_exact() - search for an exact frequency
  277. * @dev: device for which we do this operation
  278. * @freq: frequency to search for
  279. * @available: true/false - match for available opp
  280. *
  281. * Return: Searches for exact match in the opp table and returns pointer to the
  282. * matching opp if found, else returns ERR_PTR in case of error and should
  283. * be handled using IS_ERR. Error return values can be:
  284. * EINVAL: for bad pointer
  285. * ERANGE: no match found for search
  286. * ENODEV: if device not found in list of registered devices
  287. *
  288. * Note: available is a modifier for the search. if available=true, then the
  289. * match is for exact matching frequency and is available in the stored OPP
  290. * table. if false, the match is for exact frequency which is not available.
  291. *
  292. * This provides a mechanism to enable an opp which is not available currently
  293. * or the opposite as well.
  294. *
  295. * The callers are required to call dev_pm_opp_put() for the returned OPP after
  296. * use.
  297. */
  298. struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
  299. unsigned long freq,
  300. bool available)
  301. {
  302. struct opp_table *opp_table;
  303. struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
  304. opp_table = _find_opp_table(dev);
  305. if (IS_ERR(opp_table)) {
  306. int r = PTR_ERR(opp_table);
  307. dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r);
  308. return ERR_PTR(r);
  309. }
  310. mutex_lock(&opp_table->lock);
  311. list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
  312. if (temp_opp->available == available &&
  313. temp_opp->rate == freq) {
  314. opp = temp_opp;
  315. /* Increment the reference count of OPP */
  316. dev_pm_opp_get(opp);
  317. break;
  318. }
  319. }
  320. mutex_unlock(&opp_table->lock);
  321. dev_pm_opp_put_opp_table(opp_table);
  322. return opp;
  323. }
  324. EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
  325. static noinline struct dev_pm_opp *_find_freq_ceil(struct opp_table *opp_table,
  326. unsigned long *freq)
  327. {
  328. struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
  329. mutex_lock(&opp_table->lock);
  330. list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
  331. if (temp_opp->available && temp_opp->rate >= *freq) {
  332. opp = temp_opp;
  333. *freq = opp->rate;
  334. /* Increment the reference count of OPP */
  335. dev_pm_opp_get(opp);
  336. break;
  337. }
  338. }
  339. mutex_unlock(&opp_table->lock);
  340. return opp;
  341. }
  342. /**
  343. * dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq
  344. * @dev: device for which we do this operation
  345. * @freq: Start frequency
  346. *
  347. * Search for the matching ceil *available* OPP from a starting freq
  348. * for a device.
  349. *
  350. * Return: matching *opp and refreshes *freq accordingly, else returns
  351. * ERR_PTR in case of error and should be handled using IS_ERR. Error return
  352. * values can be:
  353. * EINVAL: for bad pointer
  354. * ERANGE: no match found for search
  355. * ENODEV: if device not found in list of registered devices
  356. *
  357. * The callers are required to call dev_pm_opp_put() for the returned OPP after
  358. * use.
  359. */
  360. struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
  361. unsigned long *freq)
  362. {
  363. struct opp_table *opp_table;
  364. struct dev_pm_opp *opp;
  365. if (!dev || !freq) {
  366. dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
  367. return ERR_PTR(-EINVAL);
  368. }
  369. opp_table = _find_opp_table(dev);
  370. if (IS_ERR(opp_table))
  371. return ERR_CAST(opp_table);
  372. opp = _find_freq_ceil(opp_table, freq);
  373. dev_pm_opp_put_opp_table(opp_table);
  374. return opp;
  375. }
  376. EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
  377. /**
  378. * dev_pm_opp_find_freq_floor() - Search for a rounded floor freq
  379. * @dev: device for which we do this operation
  380. * @freq: Start frequency
  381. *
  382. * Search for the matching floor *available* OPP from a starting freq
  383. * for a device.
  384. *
  385. * Return: matching *opp and refreshes *freq accordingly, else returns
  386. * ERR_PTR in case of error and should be handled using IS_ERR. Error return
  387. * values can be:
  388. * EINVAL: for bad pointer
  389. * ERANGE: no match found for search
  390. * ENODEV: if device not found in list of registered devices
  391. *
  392. * The callers are required to call dev_pm_opp_put() for the returned OPP after
  393. * use.
  394. */
  395. struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
  396. unsigned long *freq)
  397. {
  398. struct opp_table *opp_table;
  399. struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
  400. if (!dev || !freq) {
  401. dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
  402. return ERR_PTR(-EINVAL);
  403. }
  404. opp_table = _find_opp_table(dev);
  405. if (IS_ERR(opp_table))
  406. return ERR_CAST(opp_table);
  407. mutex_lock(&opp_table->lock);
  408. list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
  409. if (temp_opp->available) {
  410. /* go to the next node, before choosing prev */
  411. if (temp_opp->rate > *freq)
  412. break;
  413. else
  414. opp = temp_opp;
  415. }
  416. }
  417. /* Increment the reference count of OPP */
  418. if (!IS_ERR(opp))
  419. dev_pm_opp_get(opp);
  420. mutex_unlock(&opp_table->lock);
  421. dev_pm_opp_put_opp_table(opp_table);
  422. if (!IS_ERR(opp))
  423. *freq = opp->rate;
  424. return opp;
  425. }
  426. EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
  427. static int _set_opp_voltage(struct device *dev, struct regulator *reg,
  428. struct dev_pm_opp_supply *supply)
  429. {
  430. int ret;
  431. /* Regulator not available for device */
  432. if (IS_ERR(reg)) {
  433. dev_dbg(dev, "%s: regulator not available: %ld\n", __func__,
  434. PTR_ERR(reg));
  435. return 0;
  436. }
  437. dev_dbg(dev, "%s: voltages (mV): %lu %lu %lu\n", __func__,
  438. supply->u_volt_min, supply->u_volt, supply->u_volt_max);
  439. ret = regulator_set_voltage_triplet(reg, supply->u_volt_min,
  440. supply->u_volt, supply->u_volt_max);
  441. if (ret)
  442. dev_err(dev, "%s: failed to set voltage (%lu %lu %lu mV): %d\n",
  443. __func__, supply->u_volt_min, supply->u_volt,
  444. supply->u_volt_max, ret);
  445. return ret;
  446. }
  447. static inline int
  448. _generic_set_opp_clk_only(struct device *dev, struct clk *clk,
  449. unsigned long old_freq, unsigned long freq)
  450. {
  451. int ret;
  452. ret = clk_set_rate(clk, freq);
  453. if (ret) {
  454. dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
  455. ret);
  456. }
  457. return ret;
  458. }
  459. static inline int
  460. _generic_set_opp_domain(struct device *dev, struct clk *clk,
  461. unsigned long old_freq, unsigned long freq,
  462. unsigned int old_pstate, unsigned int new_pstate)
  463. {
  464. int ret;
  465. /* Scaling up? Scale domain performance state before frequency */
  466. if (freq > old_freq) {
  467. ret = dev_pm_genpd_set_performance_state(dev, new_pstate);
  468. if (ret)
  469. return ret;
  470. }
  471. ret = _generic_set_opp_clk_only(dev, clk, old_freq, freq);
  472. if (ret)
  473. goto restore_domain_state;
  474. /* Scaling down? Scale domain performance state after frequency */
  475. if (freq < old_freq) {
  476. ret = dev_pm_genpd_set_performance_state(dev, new_pstate);
  477. if (ret)
  478. goto restore_freq;
  479. }
  480. return 0;
  481. restore_freq:
  482. if (_generic_set_opp_clk_only(dev, clk, freq, old_freq))
  483. dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
  484. __func__, old_freq);
  485. restore_domain_state:
  486. if (freq > old_freq)
  487. dev_pm_genpd_set_performance_state(dev, old_pstate);
  488. return ret;
  489. }
  490. static int _generic_set_opp_regulator(const struct opp_table *opp_table,
  491. struct device *dev,
  492. unsigned long old_freq,
  493. unsigned long freq,
  494. struct dev_pm_opp_supply *old_supply,
  495. struct dev_pm_opp_supply *new_supply)
  496. {
  497. struct regulator *reg = opp_table->regulators[0];
  498. int ret;
  499. /* This function only supports single regulator per device */
  500. if (WARN_ON(opp_table->regulator_count > 1)) {
  501. dev_err(dev, "multiple regulators are not supported\n");
  502. return -EINVAL;
  503. }
  504. /* Scaling up? Scale voltage before frequency */
  505. if (freq >= old_freq) {
  506. ret = _set_opp_voltage(dev, reg, new_supply);
  507. if (ret)
  508. goto restore_voltage;
  509. }
  510. /* Change frequency */
  511. ret = _generic_set_opp_clk_only(dev, opp_table->clk, old_freq, freq);
  512. if (ret)
  513. goto restore_voltage;
  514. /* Scaling down? Scale voltage after frequency */
  515. if (freq < old_freq) {
  516. ret = _set_opp_voltage(dev, reg, new_supply);
  517. if (ret)
  518. goto restore_freq;
  519. }
  520. return 0;
  521. restore_freq:
  522. if (_generic_set_opp_clk_only(dev, opp_table->clk, freq, old_freq))
  523. dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
  524. __func__, old_freq);
  525. restore_voltage:
  526. /* This shouldn't harm even if the voltages weren't updated earlier */
  527. if (old_supply)
  528. _set_opp_voltage(dev, reg, old_supply);
  529. return ret;
  530. }
  531. /**
  532. * dev_pm_opp_set_rate() - Configure new OPP based on frequency
  533. * @dev: device for which we do this operation
  534. * @target_freq: frequency to achieve
  535. *
  536. * This configures the power-supplies and clock source to the levels specified
  537. * by the OPP corresponding to the target_freq.
  538. */
  539. int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
  540. {
  541. struct opp_table *opp_table;
  542. unsigned long freq, old_freq;
  543. struct dev_pm_opp *old_opp, *opp;
  544. struct clk *clk;
  545. int ret, size;
  546. if (unlikely(!target_freq)) {
  547. dev_err(dev, "%s: Invalid target frequency %lu\n", __func__,
  548. target_freq);
  549. return -EINVAL;
  550. }
  551. opp_table = _find_opp_table(dev);
  552. if (IS_ERR(opp_table)) {
  553. dev_err(dev, "%s: device opp doesn't exist\n", __func__);
  554. return PTR_ERR(opp_table);
  555. }
  556. clk = opp_table->clk;
  557. if (IS_ERR(clk)) {
  558. dev_err(dev, "%s: No clock available for the device\n",
  559. __func__);
  560. ret = PTR_ERR(clk);
  561. goto put_opp_table;
  562. }
  563. freq = clk_round_rate(clk, target_freq);
  564. if ((long)freq <= 0)
  565. freq = target_freq;
  566. old_freq = clk_get_rate(clk);
  567. /* Return early if nothing to do */
  568. if (old_freq == freq) {
  569. dev_dbg(dev, "%s: old/new frequencies (%lu Hz) are same, nothing to do\n",
  570. __func__, freq);
  571. ret = 0;
  572. goto put_opp_table;
  573. }
  574. old_opp = _find_freq_ceil(opp_table, &old_freq);
  575. if (IS_ERR(old_opp)) {
  576. dev_err(dev, "%s: failed to find current OPP for freq %lu (%ld)\n",
  577. __func__, old_freq, PTR_ERR(old_opp));
  578. }
  579. opp = _find_freq_ceil(opp_table, &freq);
  580. if (IS_ERR(opp)) {
  581. ret = PTR_ERR(opp);
  582. dev_err(dev, "%s: failed to find OPP for freq %lu (%d)\n",
  583. __func__, freq, ret);
  584. goto put_old_opp;
  585. }
  586. dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n", __func__,
  587. old_freq, freq);
  588. /* Only frequency scaling */
  589. if (!opp_table->regulators) {
  590. /*
  591. * We don't support devices with both regulator and
  592. * domain performance-state for now.
  593. */
  594. if (opp_table->genpd_performance_state)
  595. ret = _generic_set_opp_domain(dev, clk, old_freq, freq,
  596. IS_ERR(old_opp) ? 0 : old_opp->pstate,
  597. opp->pstate);
  598. else
  599. ret = _generic_set_opp_clk_only(dev, clk, old_freq, freq);
  600. } else if (!opp_table->set_opp) {
  601. ret = _generic_set_opp_regulator(opp_table, dev, old_freq, freq,
  602. IS_ERR(old_opp) ? NULL : old_opp->supplies,
  603. opp->supplies);
  604. } else {
  605. struct dev_pm_set_opp_data *data;
  606. data = opp_table->set_opp_data;
  607. data->regulators = opp_table->regulators;
  608. data->regulator_count = opp_table->regulator_count;
  609. data->clk = clk;
  610. data->dev = dev;
  611. data->old_opp.rate = old_freq;
  612. size = sizeof(*opp->supplies) * opp_table->regulator_count;
  613. if (IS_ERR(old_opp))
  614. memset(data->old_opp.supplies, 0, size);
  615. else
  616. memcpy(data->old_opp.supplies, old_opp->supplies, size);
  617. data->new_opp.rate = freq;
  618. memcpy(data->new_opp.supplies, opp->supplies, size);
  619. ret = opp_table->set_opp(data);
  620. }
  621. dev_pm_opp_put(opp);
  622. put_old_opp:
  623. if (!IS_ERR(old_opp))
  624. dev_pm_opp_put(old_opp);
  625. put_opp_table:
  626. dev_pm_opp_put_opp_table(opp_table);
  627. return ret;
  628. }
  629. EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate);
  630. /* OPP-dev Helpers */
  631. static void _remove_opp_dev(struct opp_device *opp_dev,
  632. struct opp_table *opp_table)
  633. {
  634. opp_debug_unregister(opp_dev, opp_table);
  635. list_del(&opp_dev->node);
  636. kfree(opp_dev);
  637. }
  638. static struct opp_device *_add_opp_dev_unlocked(const struct device *dev,
  639. struct opp_table *opp_table)
  640. {
  641. struct opp_device *opp_dev;
  642. int ret;
  643. opp_dev = kzalloc(sizeof(*opp_dev), GFP_KERNEL);
  644. if (!opp_dev)
  645. return NULL;
  646. /* Initialize opp-dev */
  647. opp_dev->dev = dev;
  648. list_add(&opp_dev->node, &opp_table->dev_list);
  649. /* Create debugfs entries for the opp_table */
  650. ret = opp_debug_register(opp_dev, opp_table);
  651. if (ret)
  652. dev_err(dev, "%s: Failed to register opp debugfs (%d)\n",
  653. __func__, ret);
  654. return opp_dev;
  655. }
  656. struct opp_device *_add_opp_dev(const struct device *dev,
  657. struct opp_table *opp_table)
  658. {
  659. struct opp_device *opp_dev;
  660. mutex_lock(&opp_table->lock);
  661. opp_dev = _add_opp_dev_unlocked(dev, opp_table);
  662. mutex_unlock(&opp_table->lock);
  663. return opp_dev;
  664. }
  665. static struct opp_table *_allocate_opp_table(struct device *dev, int index)
  666. {
  667. struct opp_table *opp_table;
  668. struct opp_device *opp_dev;
  669. int ret;
  670. /*
  671. * Allocate a new OPP table. In the infrequent case where a new
  672. * device is needed to be added, we pay this penalty.
  673. */
  674. opp_table = kzalloc(sizeof(*opp_table), GFP_KERNEL);
  675. if (!opp_table)
  676. return NULL;
  677. mutex_init(&opp_table->lock);
  678. INIT_LIST_HEAD(&opp_table->dev_list);
  679. opp_dev = _add_opp_dev(dev, opp_table);
  680. if (!opp_dev) {
  681. kfree(opp_table);
  682. return NULL;
  683. }
  684. _of_init_opp_table(opp_table, dev, index);
  685. /* Find clk for the device */
  686. opp_table->clk = clk_get(dev, NULL);
  687. if (IS_ERR(opp_table->clk)) {
  688. ret = PTR_ERR(opp_table->clk);
  689. if (ret != -EPROBE_DEFER)
  690. dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__,
  691. ret);
  692. }
  693. BLOCKING_INIT_NOTIFIER_HEAD(&opp_table->head);
  694. INIT_LIST_HEAD(&opp_table->opp_list);
  695. kref_init(&opp_table->kref);
  696. /* Secure the device table modification */
  697. list_add(&opp_table->node, &opp_tables);
  698. return opp_table;
  699. }
  700. void _get_opp_table_kref(struct opp_table *opp_table)
  701. {
  702. kref_get(&opp_table->kref);
  703. }
  704. static struct opp_table *_opp_get_opp_table(struct device *dev, int index)
  705. {
  706. struct opp_table *opp_table;
  707. /* Hold our table modification lock here */
  708. mutex_lock(&opp_table_lock);
  709. opp_table = _find_opp_table_unlocked(dev);
  710. if (!IS_ERR(opp_table))
  711. goto unlock;
  712. opp_table = _managed_opp(dev, index);
  713. if (opp_table) {
  714. if (!_add_opp_dev_unlocked(dev, opp_table)) {
  715. dev_pm_opp_put_opp_table(opp_table);
  716. opp_table = NULL;
  717. }
  718. goto unlock;
  719. }
  720. opp_table = _allocate_opp_table(dev, index);
  721. unlock:
  722. mutex_unlock(&opp_table_lock);
  723. return opp_table;
  724. }
  725. struct opp_table *dev_pm_opp_get_opp_table(struct device *dev)
  726. {
  727. return _opp_get_opp_table(dev, 0);
  728. }
  729. EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_table);
  730. struct opp_table *dev_pm_opp_get_opp_table_indexed(struct device *dev,
  731. int index)
  732. {
  733. return _opp_get_opp_table(dev, index);
  734. }
  735. static void _opp_table_kref_release(struct kref *kref)
  736. {
  737. struct opp_table *opp_table = container_of(kref, struct opp_table, kref);
  738. struct opp_device *opp_dev, *temp;
  739. /* Release clk */
  740. if (!IS_ERR(opp_table->clk))
  741. clk_put(opp_table->clk);
  742. WARN_ON(!list_empty(&opp_table->opp_list));
  743. list_for_each_entry_safe(opp_dev, temp, &opp_table->dev_list, node) {
  744. /*
  745. * The OPP table is getting removed, drop the performance state
  746. * constraints.
  747. */
  748. if (opp_table->genpd_performance_state)
  749. dev_pm_genpd_set_performance_state((struct device *)(opp_dev->dev), 0);
  750. _remove_opp_dev(opp_dev, opp_table);
  751. }
  752. mutex_destroy(&opp_table->lock);
  753. list_del(&opp_table->node);
  754. kfree(opp_table);
  755. mutex_unlock(&opp_table_lock);
  756. }
  757. void _opp_remove_all_static(struct opp_table *opp_table)
  758. {
  759. struct dev_pm_opp *opp, *tmp;
  760. list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) {
  761. if (!opp->dynamic)
  762. dev_pm_opp_put(opp);
  763. }
  764. opp_table->parsed_static_opps = false;
  765. }
  766. static void _opp_table_list_kref_release(struct kref *kref)
  767. {
  768. struct opp_table *opp_table = container_of(kref, struct opp_table,
  769. list_kref);
  770. _opp_remove_all_static(opp_table);
  771. mutex_unlock(&opp_table_lock);
  772. }
  773. void _put_opp_list_kref(struct opp_table *opp_table)
  774. {
  775. kref_put_mutex(&opp_table->list_kref, _opp_table_list_kref_release,
  776. &opp_table_lock);
  777. }
  778. void dev_pm_opp_put_opp_table(struct opp_table *opp_table)
  779. {
  780. kref_put_mutex(&opp_table->kref, _opp_table_kref_release,
  781. &opp_table_lock);
  782. }
  783. EXPORT_SYMBOL_GPL(dev_pm_opp_put_opp_table);
  784. void _opp_free(struct dev_pm_opp *opp)
  785. {
  786. kfree(opp);
  787. }
  788. static void _opp_kref_release(struct kref *kref)
  789. {
  790. struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref);
  791. struct opp_table *opp_table = opp->opp_table;
  792. /*
  793. * Notify the changes in the availability of the operable
  794. * frequency/voltage list.
  795. */
  796. blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_REMOVE, opp);
  797. opp_debug_remove_one(opp);
  798. list_del(&opp->node);
  799. kfree(opp);
  800. mutex_unlock(&opp_table->lock);
  801. }
  802. void dev_pm_opp_get(struct dev_pm_opp *opp)
  803. {
  804. kref_get(&opp->kref);
  805. }
  806. void dev_pm_opp_put(struct dev_pm_opp *opp)
  807. {
  808. kref_put_mutex(&opp->kref, _opp_kref_release, &opp->opp_table->lock);
  809. }
  810. EXPORT_SYMBOL_GPL(dev_pm_opp_put);
  811. /**
  812. * dev_pm_opp_remove() - Remove an OPP from OPP table
  813. * @dev: device for which we do this operation
  814. * @freq: OPP to remove with matching 'freq'
  815. *
  816. * This function removes an opp from the opp table.
  817. */
  818. void dev_pm_opp_remove(struct device *dev, unsigned long freq)
  819. {
  820. struct dev_pm_opp *opp;
  821. struct opp_table *opp_table;
  822. bool found = false;
  823. opp_table = _find_opp_table(dev);
  824. if (IS_ERR(opp_table))
  825. return;
  826. mutex_lock(&opp_table->lock);
  827. list_for_each_entry(opp, &opp_table->opp_list, node) {
  828. if (opp->rate == freq) {
  829. found = true;
  830. break;
  831. }
  832. }
  833. mutex_unlock(&opp_table->lock);
  834. if (found) {
  835. dev_pm_opp_put(opp);
  836. /* Drop the reference taken by dev_pm_opp_add() */
  837. dev_pm_opp_put_opp_table(opp_table);
  838. } else {
  839. dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n",
  840. __func__, freq);
  841. }
  842. /* Drop the reference taken by _find_opp_table() */
  843. dev_pm_opp_put_opp_table(opp_table);
  844. }
  845. EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
  846. struct dev_pm_opp *_opp_allocate(struct opp_table *table)
  847. {
  848. struct dev_pm_opp *opp;
  849. int count, supply_size;
  850. /* Allocate space for at least one supply */
  851. count = table->regulator_count ? table->regulator_count : 1;
  852. supply_size = sizeof(*opp->supplies) * count;
  853. /* allocate new OPP node and supplies structures */
  854. opp = kzalloc(sizeof(*opp) + supply_size, GFP_KERNEL);
  855. if (!opp)
  856. return NULL;
  857. /* Put the supplies at the end of the OPP structure as an empty array */
  858. opp->supplies = (struct dev_pm_opp_supply *)(opp + 1);
  859. INIT_LIST_HEAD(&opp->node);
  860. return opp;
  861. }
  862. static bool _opp_supported_by_regulators(struct dev_pm_opp *opp,
  863. struct opp_table *opp_table)
  864. {
  865. struct regulator *reg;
  866. int i;
  867. for (i = 0; i < opp_table->regulator_count; i++) {
  868. reg = opp_table->regulators[i];
  869. if (!regulator_is_supported_voltage(reg,
  870. opp->supplies[i].u_volt_min,
  871. opp->supplies[i].u_volt_max)) {
  872. pr_warn("%s: OPP minuV: %lu maxuV: %lu, not supported by regulator\n",
  873. __func__, opp->supplies[i].u_volt_min,
  874. opp->supplies[i].u_volt_max);
  875. return false;
  876. }
  877. }
  878. return true;
  879. }
  880. static int _opp_is_duplicate(struct device *dev, struct dev_pm_opp *new_opp,
  881. struct opp_table *opp_table,
  882. struct list_head **head)
  883. {
  884. struct dev_pm_opp *opp;
  885. /*
  886. * Insert new OPP in order of increasing frequency and discard if
  887. * already present.
  888. *
  889. * Need to use &opp_table->opp_list in the condition part of the 'for'
  890. * loop, don't replace it with head otherwise it will become an infinite
  891. * loop.
  892. */
  893. list_for_each_entry(opp, &opp_table->opp_list, node) {
  894. if (new_opp->rate > opp->rate) {
  895. *head = &opp->node;
  896. continue;
  897. }
  898. if (new_opp->rate < opp->rate)
  899. return 0;
  900. /* Duplicate OPPs */
  901. dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n",
  902. __func__, opp->rate, opp->supplies[0].u_volt,
  903. opp->available, new_opp->rate,
  904. new_opp->supplies[0].u_volt, new_opp->available);
  905. /* Should we compare voltages for all regulators here ? */
  906. return opp->available &&
  907. new_opp->supplies[0].u_volt == opp->supplies[0].u_volt ? -EBUSY : -EEXIST;
  908. }
  909. return 0;
  910. }
  911. /*
  912. * Returns:
  913. * 0: On success. And appropriate error message for duplicate OPPs.
  914. * -EBUSY: For OPP with same freq/volt and is available. The callers of
  915. * _opp_add() must return 0 if they receive -EBUSY from it. This is to make
  916. * sure we don't print error messages unnecessarily if different parts of
  917. * kernel try to initialize the OPP table.
  918. * -EEXIST: For OPP with same freq but different volt or is unavailable. This
  919. * should be considered an error by the callers of _opp_add().
  920. */
  921. int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
  922. struct opp_table *opp_table, bool rate_not_available)
  923. {
  924. struct list_head *head;
  925. int ret;
  926. mutex_lock(&opp_table->lock);
  927. head = &opp_table->opp_list;
  928. if (likely(!rate_not_available)) {
  929. ret = _opp_is_duplicate(dev, new_opp, opp_table, &head);
  930. if (ret) {
  931. mutex_unlock(&opp_table->lock);
  932. return ret;
  933. }
  934. }
  935. list_add(&new_opp->node, head);
  936. mutex_unlock(&opp_table->lock);
  937. new_opp->opp_table = opp_table;
  938. kref_init(&new_opp->kref);
  939. ret = opp_debug_create_one(new_opp, opp_table);
  940. if (ret)
  941. dev_err(dev, "%s: Failed to register opp to debugfs (%d)\n",
  942. __func__, ret);
  943. if (!_opp_supported_by_regulators(new_opp, opp_table)) {
  944. new_opp->available = false;
  945. dev_warn(dev, "%s: OPP not supported by regulators (%lu)\n",
  946. __func__, new_opp->rate);
  947. }
  948. return 0;
  949. }
  950. /**
  951. * _opp_add_v1() - Allocate a OPP based on v1 bindings.
  952. * @opp_table: OPP table
  953. * @dev: device for which we do this operation
  954. * @freq: Frequency in Hz for this OPP
  955. * @u_volt: Voltage in uVolts for this OPP
  956. * @dynamic: Dynamically added OPPs.
  957. *
  958. * This function adds an opp definition to the opp table and returns status.
  959. * The opp is made available by default and it can be controlled using
  960. * dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove.
  961. *
  962. * NOTE: "dynamic" parameter impacts OPPs added by the dev_pm_opp_of_add_table
  963. * and freed by dev_pm_opp_of_remove_table.
  964. *
  965. * Return:
  966. * 0 On success OR
  967. * Duplicate OPPs (both freq and volt are same) and opp->available
  968. * -EEXIST Freq are same and volt are different OR
  969. * Duplicate OPPs (both freq and volt are same) and !opp->available
  970. * -ENOMEM Memory allocation failure
  971. */
  972. int _opp_add_v1(struct opp_table *opp_table, struct device *dev,
  973. unsigned long freq, long u_volt, bool dynamic)
  974. {
  975. struct dev_pm_opp *new_opp;
  976. unsigned long tol;
  977. int ret;
  978. new_opp = _opp_allocate(opp_table);
  979. if (!new_opp)
  980. return -ENOMEM;
  981. /* populate the opp table */
  982. new_opp->rate = freq;
  983. tol = u_volt * opp_table->voltage_tolerance_v1 / 100;
  984. new_opp->supplies[0].u_volt = u_volt;
  985. new_opp->supplies[0].u_volt_min = u_volt - tol;
  986. new_opp->supplies[0].u_volt_max = u_volt + tol;
  987. new_opp->available = true;
  988. new_opp->dynamic = dynamic;
  989. ret = _opp_add(dev, new_opp, opp_table, false);
  990. if (ret) {
  991. /* Don't return error for duplicate OPPs */
  992. if (ret == -EBUSY)
  993. ret = 0;
  994. goto free_opp;
  995. }
  996. /*
  997. * Notify the changes in the availability of the operable
  998. * frequency/voltage list.
  999. */
  1000. blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ADD, new_opp);
  1001. return 0;
  1002. free_opp:
  1003. _opp_free(new_opp);
  1004. return ret;
  1005. }
  1006. /**
  1007. * dev_pm_opp_set_supported_hw() - Set supported platforms
  1008. * @dev: Device for which supported-hw has to be set.
  1009. * @versions: Array of hierarchy of versions to match.
  1010. * @count: Number of elements in the array.
  1011. *
  1012. * This is required only for the V2 bindings, and it enables a platform to
  1013. * specify the hierarchy of versions it supports. OPP layer will then enable
  1014. * OPPs, which are available for those versions, based on its 'opp-supported-hw'
  1015. * property.
  1016. */
  1017. struct opp_table *dev_pm_opp_set_supported_hw(struct device *dev,
  1018. const u32 *versions, unsigned int count)
  1019. {
  1020. struct opp_table *opp_table;
  1021. opp_table = dev_pm_opp_get_opp_table(dev);
  1022. if (!opp_table)
  1023. return ERR_PTR(-ENOMEM);
  1024. /* Make sure there are no concurrent readers while updating opp_table */
  1025. WARN_ON(!list_empty(&opp_table->opp_list));
  1026. /* Another CPU that shares the OPP table has set the property ? */
  1027. if (opp_table->supported_hw)
  1028. return opp_table;
  1029. opp_table->supported_hw = kmemdup(versions, count * sizeof(*versions),
  1030. GFP_KERNEL);
  1031. if (!opp_table->supported_hw) {
  1032. dev_pm_opp_put_opp_table(opp_table);
  1033. return ERR_PTR(-ENOMEM);
  1034. }
  1035. opp_table->supported_hw_count = count;
  1036. return opp_table;
  1037. }
  1038. EXPORT_SYMBOL_GPL(dev_pm_opp_set_supported_hw);
  1039. /**
  1040. * dev_pm_opp_put_supported_hw() - Releases resources blocked for supported hw
  1041. * @opp_table: OPP table returned by dev_pm_opp_set_supported_hw().
  1042. *
  1043. * This is required only for the V2 bindings, and is called for a matching
  1044. * dev_pm_opp_set_supported_hw(). Until this is called, the opp_table structure
  1045. * will not be freed.
  1046. */
  1047. void dev_pm_opp_put_supported_hw(struct opp_table *opp_table)
  1048. {
  1049. /* Make sure there are no concurrent readers while updating opp_table */
  1050. WARN_ON(!list_empty(&opp_table->opp_list));
  1051. kfree(opp_table->supported_hw);
  1052. opp_table->supported_hw = NULL;
  1053. opp_table->supported_hw_count = 0;
  1054. dev_pm_opp_put_opp_table(opp_table);
  1055. }
  1056. EXPORT_SYMBOL_GPL(dev_pm_opp_put_supported_hw);
  1057. /**
  1058. * dev_pm_opp_set_prop_name() - Set prop-extn name
  1059. * @dev: Device for which the prop-name has to be set.
  1060. * @name: name to postfix to properties.
  1061. *
  1062. * This is required only for the V2 bindings, and it enables a platform to
  1063. * specify the extn to be used for certain property names. The properties to
  1064. * which the extension will apply are opp-microvolt and opp-microamp. OPP core
  1065. * should postfix the property name with -<name> while looking for them.
  1066. */
  1067. struct opp_table *dev_pm_opp_set_prop_name(struct device *dev, const char *name)
  1068. {
  1069. struct opp_table *opp_table;
  1070. opp_table = dev_pm_opp_get_opp_table(dev);
  1071. if (!opp_table)
  1072. return ERR_PTR(-ENOMEM);
  1073. /* Make sure there are no concurrent readers while updating opp_table */
  1074. WARN_ON(!list_empty(&opp_table->opp_list));
  1075. /* Another CPU that shares the OPP table has set the property ? */
  1076. if (opp_table->prop_name)
  1077. return opp_table;
  1078. opp_table->prop_name = kstrdup(name, GFP_KERNEL);
  1079. if (!opp_table->prop_name) {
  1080. dev_pm_opp_put_opp_table(opp_table);
  1081. return ERR_PTR(-ENOMEM);
  1082. }
  1083. return opp_table;
  1084. }
  1085. EXPORT_SYMBOL_GPL(dev_pm_opp_set_prop_name);
  1086. /**
  1087. * dev_pm_opp_put_prop_name() - Releases resources blocked for prop-name
  1088. * @opp_table: OPP table returned by dev_pm_opp_set_prop_name().
  1089. *
  1090. * This is required only for the V2 bindings, and is called for a matching
  1091. * dev_pm_opp_set_prop_name(). Until this is called, the opp_table structure
  1092. * will not be freed.
  1093. */
  1094. void dev_pm_opp_put_prop_name(struct opp_table *opp_table)
  1095. {
  1096. /* Make sure there are no concurrent readers while updating opp_table */
  1097. WARN_ON(!list_empty(&opp_table->opp_list));
  1098. kfree(opp_table->prop_name);
  1099. opp_table->prop_name = NULL;
  1100. dev_pm_opp_put_opp_table(opp_table);
  1101. }
  1102. EXPORT_SYMBOL_GPL(dev_pm_opp_put_prop_name);
  1103. static int _allocate_set_opp_data(struct opp_table *opp_table)
  1104. {
  1105. struct dev_pm_set_opp_data *data;
  1106. int len, count = opp_table->regulator_count;
  1107. if (WARN_ON(!count))
  1108. return -EINVAL;
  1109. /* space for set_opp_data */
  1110. len = sizeof(*data);
  1111. /* space for old_opp.supplies and new_opp.supplies */
  1112. len += 2 * sizeof(struct dev_pm_opp_supply) * count;
  1113. data = kzalloc(len, GFP_KERNEL);
  1114. if (!data)
  1115. return -ENOMEM;
  1116. data->old_opp.supplies = (void *)(data + 1);
  1117. data->new_opp.supplies = data->old_opp.supplies + count;
  1118. opp_table->set_opp_data = data;
  1119. return 0;
  1120. }
  1121. static void _free_set_opp_data(struct opp_table *opp_table)
  1122. {
  1123. kfree(opp_table->set_opp_data);
  1124. opp_table->set_opp_data = NULL;
  1125. }
  1126. /**
  1127. * dev_pm_opp_set_regulators() - Set regulator names for the device
  1128. * @dev: Device for which regulator name is being set.
  1129. * @names: Array of pointers to the names of the regulator.
  1130. * @count: Number of regulators.
  1131. *
  1132. * In order to support OPP switching, OPP layer needs to know the name of the
  1133. * device's regulators, as the core would be required to switch voltages as
  1134. * well.
  1135. *
  1136. * This must be called before any OPPs are initialized for the device.
  1137. */
  1138. struct opp_table *dev_pm_opp_set_regulators(struct device *dev,
  1139. const char * const names[],
  1140. unsigned int count)
  1141. {
  1142. struct opp_table *opp_table;
  1143. struct regulator *reg;
  1144. int ret, i;
  1145. opp_table = dev_pm_opp_get_opp_table(dev);
  1146. if (!opp_table)
  1147. return ERR_PTR(-ENOMEM);
  1148. /* This should be called before OPPs are initialized */
  1149. if (WARN_ON(!list_empty(&opp_table->opp_list))) {
  1150. ret = -EBUSY;
  1151. goto err;
  1152. }
  1153. /* Another CPU that shares the OPP table has set the regulators ? */
  1154. if (opp_table->regulators)
  1155. return opp_table;
  1156. opp_table->regulators = kmalloc_array(count,
  1157. sizeof(*opp_table->regulators),
  1158. GFP_KERNEL);
  1159. if (!opp_table->regulators) {
  1160. ret = -ENOMEM;
  1161. goto err;
  1162. }
  1163. for (i = 0; i < count; i++) {
  1164. reg = regulator_get_optional(dev, names[i]);
  1165. if (IS_ERR(reg)) {
  1166. ret = PTR_ERR(reg);
  1167. if (ret != -EPROBE_DEFER)
  1168. dev_err(dev, "%s: no regulator (%s) found: %d\n",
  1169. __func__, names[i], ret);
  1170. goto free_regulators;
  1171. }
  1172. opp_table->regulators[i] = reg;
  1173. }
  1174. opp_table->regulator_count = count;
  1175. /* Allocate block only once to pass to set_opp() routines */
  1176. ret = _allocate_set_opp_data(opp_table);
  1177. if (ret)
  1178. goto free_regulators;
  1179. return opp_table;
  1180. free_regulators:
  1181. while (i != 0)
  1182. regulator_put(opp_table->regulators[--i]);
  1183. kfree(opp_table->regulators);
  1184. opp_table->regulators = NULL;
  1185. opp_table->regulator_count = 0;
  1186. err:
  1187. dev_pm_opp_put_opp_table(opp_table);
  1188. return ERR_PTR(ret);
  1189. }
  1190. EXPORT_SYMBOL_GPL(dev_pm_opp_set_regulators);
  1191. /**
  1192. * dev_pm_opp_put_regulators() - Releases resources blocked for regulator
  1193. * @opp_table: OPP table returned from dev_pm_opp_set_regulators().
  1194. */
  1195. void dev_pm_opp_put_regulators(struct opp_table *opp_table)
  1196. {
  1197. int i;
  1198. if (!opp_table->regulators)
  1199. goto put_opp_table;
  1200. /* Make sure there are no concurrent readers while updating opp_table */
  1201. WARN_ON(!list_empty(&opp_table->opp_list));
  1202. for (i = opp_table->regulator_count - 1; i >= 0; i--)
  1203. regulator_put(opp_table->regulators[i]);
  1204. _free_set_opp_data(opp_table);
  1205. kfree(opp_table->regulators);
  1206. opp_table->regulators = NULL;
  1207. opp_table->regulator_count = 0;
  1208. put_opp_table:
  1209. dev_pm_opp_put_opp_table(opp_table);
  1210. }
  1211. EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulators);
  1212. /**
  1213. * dev_pm_opp_set_clkname() - Set clk name for the device
  1214. * @dev: Device for which clk name is being set.
  1215. * @name: Clk name.
  1216. *
  1217. * In order to support OPP switching, OPP layer needs to get pointer to the
  1218. * clock for the device. Simple cases work fine without using this routine (i.e.
  1219. * by passing connection-id as NULL), but for a device with multiple clocks
  1220. * available, the OPP core needs to know the exact name of the clk to use.
  1221. *
  1222. * This must be called before any OPPs are initialized for the device.
  1223. */
  1224. struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char *name)
  1225. {
  1226. struct opp_table *opp_table;
  1227. int ret;
  1228. opp_table = dev_pm_opp_get_opp_table(dev);
  1229. if (!opp_table)
  1230. return ERR_PTR(-ENOMEM);
  1231. /* This should be called before OPPs are initialized */
  1232. if (WARN_ON(!list_empty(&opp_table->opp_list))) {
  1233. ret = -EBUSY;
  1234. goto err;
  1235. }
  1236. /* Already have default clk set, free it */
  1237. if (!IS_ERR(opp_table->clk))
  1238. clk_put(opp_table->clk);
  1239. /* Find clk for the device */
  1240. opp_table->clk = clk_get(dev, name);
  1241. if (IS_ERR(opp_table->clk)) {
  1242. ret = PTR_ERR(opp_table->clk);
  1243. if (ret != -EPROBE_DEFER) {
  1244. dev_err(dev, "%s: Couldn't find clock: %d\n", __func__,
  1245. ret);
  1246. }
  1247. goto err;
  1248. }
  1249. return opp_table;
  1250. err:
  1251. dev_pm_opp_put_opp_table(opp_table);
  1252. return ERR_PTR(ret);
  1253. }
  1254. EXPORT_SYMBOL_GPL(dev_pm_opp_set_clkname);
  1255. /**
  1256. * dev_pm_opp_put_clkname() - Releases resources blocked for clk.
  1257. * @opp_table: OPP table returned from dev_pm_opp_set_clkname().
  1258. */
  1259. void dev_pm_opp_put_clkname(struct opp_table *opp_table)
  1260. {
  1261. /* Make sure there are no concurrent readers while updating opp_table */
  1262. WARN_ON(!list_empty(&opp_table->opp_list));
  1263. clk_put(opp_table->clk);
  1264. opp_table->clk = ERR_PTR(-EINVAL);
  1265. dev_pm_opp_put_opp_table(opp_table);
  1266. }
  1267. EXPORT_SYMBOL_GPL(dev_pm_opp_put_clkname);
  1268. /**
  1269. * dev_pm_opp_register_set_opp_helper() - Register custom set OPP helper
  1270. * @dev: Device for which the helper is getting registered.
  1271. * @set_opp: Custom set OPP helper.
  1272. *
  1273. * This is useful to support complex platforms (like platforms with multiple
  1274. * regulators per device), instead of the generic OPP set rate helper.
  1275. *
  1276. * This must be called before any OPPs are initialized for the device.
  1277. */
  1278. struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev,
  1279. int (*set_opp)(struct dev_pm_set_opp_data *data))
  1280. {
  1281. struct opp_table *opp_table;
  1282. if (!set_opp)
  1283. return ERR_PTR(-EINVAL);
  1284. opp_table = dev_pm_opp_get_opp_table(dev);
  1285. if (!opp_table)
  1286. return ERR_PTR(-ENOMEM);
  1287. /* This should be called before OPPs are initialized */
  1288. if (WARN_ON(!list_empty(&opp_table->opp_list))) {
  1289. dev_pm_opp_put_opp_table(opp_table);
  1290. return ERR_PTR(-EBUSY);
  1291. }
  1292. /* Another CPU that shares the OPP table has set the helper ? */
  1293. if (!opp_table->set_opp)
  1294. opp_table->set_opp = set_opp;
  1295. return opp_table;
  1296. }
  1297. EXPORT_SYMBOL_GPL(dev_pm_opp_register_set_opp_helper);
  1298. /**
  1299. * dev_pm_opp_unregister_set_opp_helper() - Releases resources blocked for
  1300. * set_opp helper
  1301. * @opp_table: OPP table returned from dev_pm_opp_register_set_opp_helper().
  1302. *
  1303. * Release resources blocked for platform specific set_opp helper.
  1304. */
  1305. void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table)
  1306. {
  1307. /* Make sure there are no concurrent readers while updating opp_table */
  1308. WARN_ON(!list_empty(&opp_table->opp_list));
  1309. opp_table->set_opp = NULL;
  1310. dev_pm_opp_put_opp_table(opp_table);
  1311. }
  1312. EXPORT_SYMBOL_GPL(dev_pm_opp_unregister_set_opp_helper);
  1313. /**
  1314. * dev_pm_opp_add() - Add an OPP table from a table definitions
  1315. * @dev: device for which we do this operation
  1316. * @freq: Frequency in Hz for this OPP
  1317. * @u_volt: Voltage in uVolts for this OPP
  1318. *
  1319. * This function adds an opp definition to the opp table and returns status.
  1320. * The opp is made available by default and it can be controlled using
  1321. * dev_pm_opp_enable/disable functions.
  1322. *
  1323. * Return:
  1324. * 0 On success OR
  1325. * Duplicate OPPs (both freq and volt are same) and opp->available
  1326. * -EEXIST Freq are same and volt are different OR
  1327. * Duplicate OPPs (both freq and volt are same) and !opp->available
  1328. * -ENOMEM Memory allocation failure
  1329. */
  1330. int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
  1331. {
  1332. struct opp_table *opp_table;
  1333. int ret;
  1334. opp_table = dev_pm_opp_get_opp_table(dev);
  1335. if (!opp_table)
  1336. return -ENOMEM;
  1337. ret = _opp_add_v1(opp_table, dev, freq, u_volt, true);
  1338. if (ret)
  1339. dev_pm_opp_put_opp_table(opp_table);
  1340. return ret;
  1341. }
  1342. EXPORT_SYMBOL_GPL(dev_pm_opp_add);
  1343. /**
  1344. * _opp_set_availability() - helper to set the availability of an opp
  1345. * @dev: device for which we do this operation
  1346. * @freq: OPP frequency to modify availability
  1347. * @availability_req: availability status requested for this opp
  1348. *
  1349. * Set the availability of an OPP, opp_{enable,disable} share a common logic
  1350. * which is isolated here.
  1351. *
  1352. * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
  1353. * copy operation, returns 0 if no modification was done OR modification was
  1354. * successful.
  1355. */
  1356. static int _opp_set_availability(struct device *dev, unsigned long freq,
  1357. bool availability_req)
  1358. {
  1359. struct opp_table *opp_table;
  1360. struct dev_pm_opp *tmp_opp, *opp = ERR_PTR(-ENODEV);
  1361. int r = 0;
  1362. /* Find the opp_table */
  1363. opp_table = _find_opp_table(dev);
  1364. if (IS_ERR(opp_table)) {
  1365. r = PTR_ERR(opp_table);
  1366. dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
  1367. return r;
  1368. }
  1369. mutex_lock(&opp_table->lock);
  1370. /* Do we have the frequency? */
  1371. list_for_each_entry(tmp_opp, &opp_table->opp_list, node) {
  1372. if (tmp_opp->rate == freq) {
  1373. opp = tmp_opp;
  1374. break;
  1375. }
  1376. }
  1377. if (IS_ERR(opp)) {
  1378. r = PTR_ERR(opp);
  1379. goto unlock;
  1380. }
  1381. /* Is update really needed? */
  1382. if (opp->available == availability_req)
  1383. goto unlock;
  1384. opp->available = availability_req;
  1385. dev_pm_opp_get(opp);
  1386. mutex_unlock(&opp_table->lock);
  1387. /* Notify the change of the OPP availability */
  1388. if (availability_req)
  1389. blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ENABLE,
  1390. opp);
  1391. else
  1392. blocking_notifier_call_chain(&opp_table->head,
  1393. OPP_EVENT_DISABLE, opp);
  1394. dev_pm_opp_put(opp);
  1395. goto put_table;
  1396. unlock:
  1397. mutex_unlock(&opp_table->lock);
  1398. put_table:
  1399. dev_pm_opp_put_opp_table(opp_table);
  1400. return r;
  1401. }
  1402. /**
  1403. * dev_pm_opp_enable() - Enable a specific OPP
  1404. * @dev: device for which we do this operation
  1405. * @freq: OPP frequency to enable
  1406. *
  1407. * Enables a provided opp. If the operation is valid, this returns 0, else the
  1408. * corresponding error value. It is meant to be used for users an OPP available
  1409. * after being temporarily made unavailable with dev_pm_opp_disable.
  1410. *
  1411. * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
  1412. * copy operation, returns 0 if no modification was done OR modification was
  1413. * successful.
  1414. */
  1415. int dev_pm_opp_enable(struct device *dev, unsigned long freq)
  1416. {
  1417. return _opp_set_availability(dev, freq, true);
  1418. }
  1419. EXPORT_SYMBOL_GPL(dev_pm_opp_enable);
  1420. /**
  1421. * dev_pm_opp_disable() - Disable a specific OPP
  1422. * @dev: device for which we do this operation
  1423. * @freq: OPP frequency to disable
  1424. *
  1425. * Disables a provided opp. If the operation is valid, this returns
  1426. * 0, else the corresponding error value. It is meant to be a temporary
  1427. * control by users to make this OPP not available until the circumstances are
  1428. * right to make it available again (with a call to dev_pm_opp_enable).
  1429. *
  1430. * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
  1431. * copy operation, returns 0 if no modification was done OR modification was
  1432. * successful.
  1433. */
  1434. int dev_pm_opp_disable(struct device *dev, unsigned long freq)
  1435. {
  1436. return _opp_set_availability(dev, freq, false);
  1437. }
  1438. EXPORT_SYMBOL_GPL(dev_pm_opp_disable);
  1439. /**
  1440. * dev_pm_opp_register_notifier() - Register OPP notifier for the device
  1441. * @dev: Device for which notifier needs to be registered
  1442. * @nb: Notifier block to be registered
  1443. *
  1444. * Return: 0 on success or a negative error value.
  1445. */
  1446. int dev_pm_opp_register_notifier(struct device *dev, struct notifier_block *nb)
  1447. {
  1448. struct opp_table *opp_table;
  1449. int ret;
  1450. opp_table = _find_opp_table(dev);
  1451. if (IS_ERR(opp_table))
  1452. return PTR_ERR(opp_table);
  1453. ret = blocking_notifier_chain_register(&opp_table->head, nb);
  1454. dev_pm_opp_put_opp_table(opp_table);
  1455. return ret;
  1456. }
  1457. EXPORT_SYMBOL(dev_pm_opp_register_notifier);
  1458. /**
  1459. * dev_pm_opp_unregister_notifier() - Unregister OPP notifier for the device
  1460. * @dev: Device for which notifier needs to be unregistered
  1461. * @nb: Notifier block to be unregistered
  1462. *
  1463. * Return: 0 on success or a negative error value.
  1464. */
  1465. int dev_pm_opp_unregister_notifier(struct device *dev,
  1466. struct notifier_block *nb)
  1467. {
  1468. struct opp_table *opp_table;
  1469. int ret;
  1470. opp_table = _find_opp_table(dev);
  1471. if (IS_ERR(opp_table))
  1472. return PTR_ERR(opp_table);
  1473. ret = blocking_notifier_chain_unregister(&opp_table->head, nb);
  1474. dev_pm_opp_put_opp_table(opp_table);
  1475. return ret;
  1476. }
  1477. EXPORT_SYMBOL(dev_pm_opp_unregister_notifier);
  1478. void _dev_pm_opp_find_and_remove_table(struct device *dev)
  1479. {
  1480. struct opp_table *opp_table;
  1481. /* Check for existing table for 'dev' */
  1482. opp_table = _find_opp_table(dev);
  1483. if (IS_ERR(opp_table)) {
  1484. int error = PTR_ERR(opp_table);
  1485. if (error != -ENODEV)
  1486. WARN(1, "%s: opp_table: %d\n",
  1487. IS_ERR_OR_NULL(dev) ?
  1488. "Invalid device" : dev_name(dev),
  1489. error);
  1490. return;
  1491. }
  1492. _put_opp_list_kref(opp_table);
  1493. /* Drop reference taken by _find_opp_table() */
  1494. dev_pm_opp_put_opp_table(opp_table);
  1495. /* Drop reference taken while the OPP table was added */
  1496. dev_pm_opp_put_opp_table(opp_table);
  1497. }
  1498. /**
  1499. * dev_pm_opp_remove_table() - Free all OPPs associated with the device
  1500. * @dev: device pointer used to lookup OPP table.
  1501. *
  1502. * Free both OPPs created using static entries present in DT and the
  1503. * dynamically added entries.
  1504. */
  1505. void dev_pm_opp_remove_table(struct device *dev)
  1506. {
  1507. _dev_pm_opp_find_and_remove_table(dev);
  1508. }
  1509. EXPORT_SYMBOL_GPL(dev_pm_opp_remove_table);