core.c 47 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821
  1. /*
  2. * Generic OPP Interface
  3. *
  4. * Copyright (C) 2009-2010 Texas Instruments Incorporated.
  5. * Nishanth Menon
  6. * Romit Dasgupta
  7. * Kevin Hilman
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. */
  13. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  14. #include <linux/clk.h>
  15. #include <linux/errno.h>
  16. #include <linux/err.h>
  17. #include <linux/slab.h>
  18. #include <linux/device.h>
  19. #include <linux/export.h>
  20. #include <linux/pm_domain.h>
  21. #include <linux/regulator/consumer.h>
  22. #include "opp.h"
  23. /*
  24. * The root of the list of all opp-tables. All opp_table structures branch off
  25. * from here, with each opp_table containing the list of opps it supports in
  26. * various states of availability.
  27. */
  28. LIST_HEAD(opp_tables);
  29. /* Lock to allow exclusive modification to the device and opp lists */
  30. DEFINE_MUTEX(opp_table_lock);
  31. static struct opp_device *_find_opp_dev(const struct device *dev,
  32. struct opp_table *opp_table)
  33. {
  34. struct opp_device *opp_dev;
  35. list_for_each_entry(opp_dev, &opp_table->dev_list, node)
  36. if (opp_dev->dev == dev)
  37. return opp_dev;
  38. return NULL;
  39. }
  40. static struct opp_table *_find_opp_table_unlocked(struct device *dev)
  41. {
  42. struct opp_table *opp_table;
  43. bool found;
  44. list_for_each_entry(opp_table, &opp_tables, node) {
  45. mutex_lock(&opp_table->lock);
  46. found = !!_find_opp_dev(dev, opp_table);
  47. mutex_unlock(&opp_table->lock);
  48. if (found) {
  49. _get_opp_table_kref(opp_table);
  50. return opp_table;
  51. }
  52. }
  53. return ERR_PTR(-ENODEV);
  54. }
  55. /**
  56. * _find_opp_table() - find opp_table struct using device pointer
  57. * @dev: device pointer used to lookup OPP table
  58. *
  59. * Search OPP table for one containing matching device.
  60. *
  61. * Return: pointer to 'struct opp_table' if found, otherwise -ENODEV or
  62. * -EINVAL based on type of error.
  63. *
  64. * The callers must call dev_pm_opp_put_opp_table() after the table is used.
  65. */
  66. struct opp_table *_find_opp_table(struct device *dev)
  67. {
  68. struct opp_table *opp_table;
  69. if (IS_ERR_OR_NULL(dev)) {
  70. pr_err("%s: Invalid parameters\n", __func__);
  71. return ERR_PTR(-EINVAL);
  72. }
  73. mutex_lock(&opp_table_lock);
  74. opp_table = _find_opp_table_unlocked(dev);
  75. mutex_unlock(&opp_table_lock);
  76. return opp_table;
  77. }
  78. /**
  79. * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an opp
  80. * @opp: opp for which voltage has to be returned for
  81. *
  82. * Return: voltage in micro volt corresponding to the opp, else
  83. * return 0
  84. *
  85. * This is useful only for devices with single power supply.
  86. */
  87. unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
  88. {
  89. if (IS_ERR_OR_NULL(opp)) {
  90. pr_err("%s: Invalid parameters\n", __func__);
  91. return 0;
  92. }
  93. return opp->supplies[0].u_volt;
  94. }
  95. EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage);
  96. /**
  97. * dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp
  98. * @opp: opp for which frequency has to be returned for
  99. *
  100. * Return: frequency in hertz corresponding to the opp, else
  101. * return 0
  102. */
  103. unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
  104. {
  105. if (IS_ERR_OR_NULL(opp) || !opp->available) {
  106. pr_err("%s: Invalid parameters\n", __func__);
  107. return 0;
  108. }
  109. return opp->rate;
  110. }
  111. EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
  112. /**
  113. * dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not
  114. * @opp: opp for which turbo mode is being verified
  115. *
  116. * Turbo OPPs are not for normal use, and can be enabled (under certain
  117. * conditions) for short duration of times to finish high throughput work
  118. * quickly. Running on them for longer times may overheat the chip.
  119. *
  120. * Return: true if opp is turbo opp, else false.
  121. */
  122. bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp)
  123. {
  124. if (IS_ERR_OR_NULL(opp) || !opp->available) {
  125. pr_err("%s: Invalid parameters\n", __func__);
  126. return false;
  127. }
  128. return opp->turbo;
  129. }
  130. EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo);
  131. /**
  132. * dev_pm_opp_get_max_clock_latency() - Get max clock latency in nanoseconds
  133. * @dev: device for which we do this operation
  134. *
  135. * Return: This function returns the max clock latency in nanoseconds.
  136. */
  137. unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
  138. {
  139. struct opp_table *opp_table;
  140. unsigned long clock_latency_ns;
  141. opp_table = _find_opp_table(dev);
  142. if (IS_ERR(opp_table))
  143. return 0;
  144. clock_latency_ns = opp_table->clock_latency_ns_max;
  145. dev_pm_opp_put_opp_table(opp_table);
  146. return clock_latency_ns;
  147. }
  148. EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency);
  149. /**
  150. * dev_pm_opp_get_max_volt_latency() - Get max voltage latency in nanoseconds
  151. * @dev: device for which we do this operation
  152. *
  153. * Return: This function returns the max voltage latency in nanoseconds.
  154. */
  155. unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
  156. {
  157. struct opp_table *opp_table;
  158. struct dev_pm_opp *opp;
  159. struct regulator *reg;
  160. unsigned long latency_ns = 0;
  161. int ret, i, count;
  162. struct {
  163. unsigned long min;
  164. unsigned long max;
  165. } *uV;
  166. opp_table = _find_opp_table(dev);
  167. if (IS_ERR(opp_table))
  168. return 0;
  169. count = opp_table->regulator_count;
  170. /* Regulator may not be required for the device */
  171. if (!count)
  172. goto put_opp_table;
  173. uV = kmalloc_array(count, sizeof(*uV), GFP_KERNEL);
  174. if (!uV)
  175. goto put_opp_table;
  176. mutex_lock(&opp_table->lock);
  177. for (i = 0; i < count; i++) {
  178. uV[i].min = ~0;
  179. uV[i].max = 0;
  180. list_for_each_entry(opp, &opp_table->opp_list, node) {
  181. if (!opp->available)
  182. continue;
  183. if (opp->supplies[i].u_volt_min < uV[i].min)
  184. uV[i].min = opp->supplies[i].u_volt_min;
  185. if (opp->supplies[i].u_volt_max > uV[i].max)
  186. uV[i].max = opp->supplies[i].u_volt_max;
  187. }
  188. }
  189. mutex_unlock(&opp_table->lock);
  190. /*
  191. * The caller needs to ensure that opp_table (and hence the regulator)
  192. * isn't freed, while we are executing this routine.
  193. */
  194. for (i = 0; i < count; i++) {
  195. reg = opp_table->regulators[i];
  196. ret = regulator_set_voltage_time(reg, uV[i].min, uV[i].max);
  197. if (ret > 0)
  198. latency_ns += ret * 1000;
  199. }
  200. kfree(uV);
  201. put_opp_table:
  202. dev_pm_opp_put_opp_table(opp_table);
  203. return latency_ns;
  204. }
  205. EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_volt_latency);
  206. /**
  207. * dev_pm_opp_get_max_transition_latency() - Get max transition latency in
  208. * nanoseconds
  209. * @dev: device for which we do this operation
  210. *
  211. * Return: This function returns the max transition latency, in nanoseconds, to
  212. * switch from one OPP to other.
  213. */
  214. unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev)
  215. {
  216. return dev_pm_opp_get_max_volt_latency(dev) +
  217. dev_pm_opp_get_max_clock_latency(dev);
  218. }
  219. EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_transition_latency);
  220. /**
  221. * dev_pm_opp_get_suspend_opp_freq() - Get frequency of suspend opp in Hz
  222. * @dev: device for which we do this operation
  223. *
  224. * Return: This function returns the frequency of the OPP marked as suspend_opp
  225. * if one is available, else returns 0;
  226. */
  227. unsigned long dev_pm_opp_get_suspend_opp_freq(struct device *dev)
  228. {
  229. struct opp_table *opp_table;
  230. unsigned long freq = 0;
  231. opp_table = _find_opp_table(dev);
  232. if (IS_ERR(opp_table))
  233. return 0;
  234. if (opp_table->suspend_opp && opp_table->suspend_opp->available)
  235. freq = dev_pm_opp_get_freq(opp_table->suspend_opp);
  236. dev_pm_opp_put_opp_table(opp_table);
  237. return freq;
  238. }
  239. EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp_freq);
  240. int _get_opp_count(struct opp_table *opp_table)
  241. {
  242. struct dev_pm_opp *opp;
  243. int count = 0;
  244. mutex_lock(&opp_table->lock);
  245. list_for_each_entry(opp, &opp_table->opp_list, node) {
  246. if (opp->available)
  247. count++;
  248. }
  249. mutex_unlock(&opp_table->lock);
  250. return count;
  251. }
  252. /**
  253. * dev_pm_opp_get_opp_count() - Get number of opps available in the opp table
  254. * @dev: device for which we do this operation
  255. *
  256. * Return: This function returns the number of available opps if there are any,
  257. * else returns 0 if none or the corresponding error value.
  258. */
  259. int dev_pm_opp_get_opp_count(struct device *dev)
  260. {
  261. struct opp_table *opp_table;
  262. int count;
  263. opp_table = _find_opp_table(dev);
  264. if (IS_ERR(opp_table)) {
  265. count = PTR_ERR(opp_table);
  266. dev_dbg(dev, "%s: OPP table not found (%d)\n",
  267. __func__, count);
  268. return 0;
  269. }
  270. count = _get_opp_count(opp_table);
  271. dev_pm_opp_put_opp_table(opp_table);
  272. return count;
  273. }
  274. EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
  275. /**
  276. * dev_pm_opp_find_freq_exact() - search for an exact frequency
  277. * @dev: device for which we do this operation
  278. * @freq: frequency to search for
  279. * @available: true/false - match for available opp
  280. *
  281. * Return: Searches for exact match in the opp table and returns pointer to the
  282. * matching opp if found, else returns ERR_PTR in case of error and should
  283. * be handled using IS_ERR. Error return values can be:
  284. * EINVAL: for bad pointer
  285. * ERANGE: no match found for search
  286. * ENODEV: if device not found in list of registered devices
  287. *
  288. * Note: available is a modifier for the search. if available=true, then the
  289. * match is for exact matching frequency and is available in the stored OPP
  290. * table. if false, the match is for exact frequency which is not available.
  291. *
  292. * This provides a mechanism to enable an opp which is not available currently
  293. * or the opposite as well.
  294. *
  295. * The callers are required to call dev_pm_opp_put() for the returned OPP after
  296. * use.
  297. */
  298. struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
  299. unsigned long freq,
  300. bool available)
  301. {
  302. struct opp_table *opp_table;
  303. struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
  304. opp_table = _find_opp_table(dev);
  305. if (IS_ERR(opp_table)) {
  306. int r = PTR_ERR(opp_table);
  307. dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r);
  308. return ERR_PTR(r);
  309. }
  310. mutex_lock(&opp_table->lock);
  311. list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
  312. if (temp_opp->available == available &&
  313. temp_opp->rate == freq) {
  314. opp = temp_opp;
  315. /* Increment the reference count of OPP */
  316. dev_pm_opp_get(opp);
  317. break;
  318. }
  319. }
  320. mutex_unlock(&opp_table->lock);
  321. dev_pm_opp_put_opp_table(opp_table);
  322. return opp;
  323. }
  324. EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
  325. static noinline struct dev_pm_opp *_find_freq_ceil(struct opp_table *opp_table,
  326. unsigned long *freq)
  327. {
  328. struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
  329. mutex_lock(&opp_table->lock);
  330. list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
  331. if (temp_opp->available && temp_opp->rate >= *freq) {
  332. opp = temp_opp;
  333. *freq = opp->rate;
  334. /* Increment the reference count of OPP */
  335. dev_pm_opp_get(opp);
  336. break;
  337. }
  338. }
  339. mutex_unlock(&opp_table->lock);
  340. return opp;
  341. }
  342. /**
  343. * dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq
  344. * @dev: device for which we do this operation
  345. * @freq: Start frequency
  346. *
  347. * Search for the matching ceil *available* OPP from a starting freq
  348. * for a device.
  349. *
  350. * Return: matching *opp and refreshes *freq accordingly, else returns
  351. * ERR_PTR in case of error and should be handled using IS_ERR. Error return
  352. * values can be:
  353. * EINVAL: for bad pointer
  354. * ERANGE: no match found for search
  355. * ENODEV: if device not found in list of registered devices
  356. *
  357. * The callers are required to call dev_pm_opp_put() for the returned OPP after
  358. * use.
  359. */
  360. struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
  361. unsigned long *freq)
  362. {
  363. struct opp_table *opp_table;
  364. struct dev_pm_opp *opp;
  365. if (!dev || !freq) {
  366. dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
  367. return ERR_PTR(-EINVAL);
  368. }
  369. opp_table = _find_opp_table(dev);
  370. if (IS_ERR(opp_table))
  371. return ERR_CAST(opp_table);
  372. opp = _find_freq_ceil(opp_table, freq);
  373. dev_pm_opp_put_opp_table(opp_table);
  374. return opp;
  375. }
  376. EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
  377. /**
  378. * dev_pm_opp_find_freq_floor() - Search for a rounded floor freq
  379. * @dev: device for which we do this operation
  380. * @freq: Start frequency
  381. *
  382. * Search for the matching floor *available* OPP from a starting freq
  383. * for a device.
  384. *
  385. * Return: matching *opp and refreshes *freq accordingly, else returns
  386. * ERR_PTR in case of error and should be handled using IS_ERR. Error return
  387. * values can be:
  388. * EINVAL: for bad pointer
  389. * ERANGE: no match found for search
  390. * ENODEV: if device not found in list of registered devices
  391. *
  392. * The callers are required to call dev_pm_opp_put() for the returned OPP after
  393. * use.
  394. */
  395. struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
  396. unsigned long *freq)
  397. {
  398. struct opp_table *opp_table;
  399. struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
  400. if (!dev || !freq) {
  401. dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
  402. return ERR_PTR(-EINVAL);
  403. }
  404. opp_table = _find_opp_table(dev);
  405. if (IS_ERR(opp_table))
  406. return ERR_CAST(opp_table);
  407. mutex_lock(&opp_table->lock);
  408. list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
  409. if (temp_opp->available) {
  410. /* go to the next node, before choosing prev */
  411. if (temp_opp->rate > *freq)
  412. break;
  413. else
  414. opp = temp_opp;
  415. }
  416. }
  417. /* Increment the reference count of OPP */
  418. if (!IS_ERR(opp))
  419. dev_pm_opp_get(opp);
  420. mutex_unlock(&opp_table->lock);
  421. dev_pm_opp_put_opp_table(opp_table);
  422. if (!IS_ERR(opp))
  423. *freq = opp->rate;
  424. return opp;
  425. }
  426. EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
  427. static int _set_opp_voltage(struct device *dev, struct regulator *reg,
  428. struct dev_pm_opp_supply *supply)
  429. {
  430. int ret;
  431. /* Regulator not available for device */
  432. if (IS_ERR(reg)) {
  433. dev_dbg(dev, "%s: regulator not available: %ld\n", __func__,
  434. PTR_ERR(reg));
  435. return 0;
  436. }
  437. dev_dbg(dev, "%s: voltages (mV): %lu %lu %lu\n", __func__,
  438. supply->u_volt_min, supply->u_volt, supply->u_volt_max);
  439. ret = regulator_set_voltage_triplet(reg, supply->u_volt_min,
  440. supply->u_volt, supply->u_volt_max);
  441. if (ret)
  442. dev_err(dev, "%s: failed to set voltage (%lu %lu %lu mV): %d\n",
  443. __func__, supply->u_volt_min, supply->u_volt,
  444. supply->u_volt_max, ret);
  445. return ret;
  446. }
  447. static inline int
  448. _generic_set_opp_clk_only(struct device *dev, struct clk *clk,
  449. unsigned long old_freq, unsigned long freq)
  450. {
  451. int ret;
  452. ret = clk_set_rate(clk, freq);
  453. if (ret) {
  454. dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
  455. ret);
  456. }
  457. return ret;
  458. }
  459. static inline int
  460. _generic_set_opp_domain(struct device *dev, struct clk *clk,
  461. unsigned long old_freq, unsigned long freq,
  462. unsigned int old_pstate, unsigned int new_pstate)
  463. {
  464. int ret;
  465. /* Scaling up? Scale domain performance state before frequency */
  466. if (freq > old_freq) {
  467. ret = dev_pm_genpd_set_performance_state(dev, new_pstate);
  468. if (ret)
  469. return ret;
  470. }
  471. ret = _generic_set_opp_clk_only(dev, clk, old_freq, freq);
  472. if (ret)
  473. goto restore_domain_state;
  474. /* Scaling down? Scale domain performance state after frequency */
  475. if (freq < old_freq) {
  476. ret = dev_pm_genpd_set_performance_state(dev, new_pstate);
  477. if (ret)
  478. goto restore_freq;
  479. }
  480. return 0;
  481. restore_freq:
  482. if (_generic_set_opp_clk_only(dev, clk, freq, old_freq))
  483. dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
  484. __func__, old_freq);
  485. restore_domain_state:
  486. if (freq > old_freq)
  487. dev_pm_genpd_set_performance_state(dev, old_pstate);
  488. return ret;
  489. }
  490. static int _generic_set_opp_regulator(const struct opp_table *opp_table,
  491. struct device *dev,
  492. unsigned long old_freq,
  493. unsigned long freq,
  494. struct dev_pm_opp_supply *old_supply,
  495. struct dev_pm_opp_supply *new_supply)
  496. {
  497. struct regulator *reg = opp_table->regulators[0];
  498. int ret;
  499. /* This function only supports single regulator per device */
  500. if (WARN_ON(opp_table->regulator_count > 1)) {
  501. dev_err(dev, "multiple regulators are not supported\n");
  502. return -EINVAL;
  503. }
  504. /* Scaling up? Scale voltage before frequency */
  505. if (freq >= old_freq) {
  506. ret = _set_opp_voltage(dev, reg, new_supply);
  507. if (ret)
  508. goto restore_voltage;
  509. }
  510. /* Change frequency */
  511. ret = _generic_set_opp_clk_only(dev, opp_table->clk, old_freq, freq);
  512. if (ret)
  513. goto restore_voltage;
  514. /* Scaling down? Scale voltage after frequency */
  515. if (freq < old_freq) {
  516. ret = _set_opp_voltage(dev, reg, new_supply);
  517. if (ret)
  518. goto restore_freq;
  519. }
  520. return 0;
  521. restore_freq:
  522. if (_generic_set_opp_clk_only(dev, opp_table->clk, freq, old_freq))
  523. dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
  524. __func__, old_freq);
  525. restore_voltage:
  526. /* This shouldn't harm even if the voltages weren't updated earlier */
  527. if (old_supply)
  528. _set_opp_voltage(dev, reg, old_supply);
  529. return ret;
  530. }
  531. /**
  532. * dev_pm_opp_set_rate() - Configure new OPP based on frequency
  533. * @dev: device for which we do this operation
  534. * @target_freq: frequency to achieve
  535. *
  536. * This configures the power-supplies and clock source to the levels specified
  537. * by the OPP corresponding to the target_freq.
  538. */
  539. int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
  540. {
  541. struct opp_table *opp_table;
  542. unsigned long freq, old_freq;
  543. struct dev_pm_opp *old_opp, *opp;
  544. struct clk *clk;
  545. int ret, size;
  546. if (unlikely(!target_freq)) {
  547. dev_err(dev, "%s: Invalid target frequency %lu\n", __func__,
  548. target_freq);
  549. return -EINVAL;
  550. }
  551. opp_table = _find_opp_table(dev);
  552. if (IS_ERR(opp_table)) {
  553. dev_err(dev, "%s: device opp doesn't exist\n", __func__);
  554. return PTR_ERR(opp_table);
  555. }
  556. clk = opp_table->clk;
  557. if (IS_ERR(clk)) {
  558. dev_err(dev, "%s: No clock available for the device\n",
  559. __func__);
  560. ret = PTR_ERR(clk);
  561. goto put_opp_table;
  562. }
  563. freq = clk_round_rate(clk, target_freq);
  564. if ((long)freq <= 0)
  565. freq = target_freq;
  566. old_freq = clk_get_rate(clk);
  567. /* Return early if nothing to do */
  568. if (old_freq == freq) {
  569. dev_dbg(dev, "%s: old/new frequencies (%lu Hz) are same, nothing to do\n",
  570. __func__, freq);
  571. ret = 0;
  572. goto put_opp_table;
  573. }
  574. old_opp = _find_freq_ceil(opp_table, &old_freq);
  575. if (IS_ERR(old_opp)) {
  576. dev_err(dev, "%s: failed to find current OPP for freq %lu (%ld)\n",
  577. __func__, old_freq, PTR_ERR(old_opp));
  578. }
  579. opp = _find_freq_ceil(opp_table, &freq);
  580. if (IS_ERR(opp)) {
  581. ret = PTR_ERR(opp);
  582. dev_err(dev, "%s: failed to find OPP for freq %lu (%d)\n",
  583. __func__, freq, ret);
  584. goto put_old_opp;
  585. }
  586. dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n", __func__,
  587. old_freq, freq);
  588. /* Only frequency scaling */
  589. if (!opp_table->regulators) {
  590. /*
  591. * We don't support devices with both regulator and
  592. * domain performance-state for now.
  593. */
  594. if (opp_table->genpd_performance_state)
  595. ret = _generic_set_opp_domain(dev, clk, old_freq, freq,
  596. IS_ERR(old_opp) ? 0 : old_opp->pstate,
  597. opp->pstate);
  598. else
  599. ret = _generic_set_opp_clk_only(dev, clk, old_freq, freq);
  600. } else if (!opp_table->set_opp) {
  601. ret = _generic_set_opp_regulator(opp_table, dev, old_freq, freq,
  602. IS_ERR(old_opp) ? NULL : old_opp->supplies,
  603. opp->supplies);
  604. } else {
  605. struct dev_pm_set_opp_data *data;
  606. data = opp_table->set_opp_data;
  607. data->regulators = opp_table->regulators;
  608. data->regulator_count = opp_table->regulator_count;
  609. data->clk = clk;
  610. data->dev = dev;
  611. data->old_opp.rate = old_freq;
  612. size = sizeof(*opp->supplies) * opp_table->regulator_count;
  613. if (IS_ERR(old_opp))
  614. memset(data->old_opp.supplies, 0, size);
  615. else
  616. memcpy(data->old_opp.supplies, old_opp->supplies, size);
  617. data->new_opp.rate = freq;
  618. memcpy(data->new_opp.supplies, opp->supplies, size);
  619. ret = opp_table->set_opp(data);
  620. }
  621. dev_pm_opp_put(opp);
  622. put_old_opp:
  623. if (!IS_ERR(old_opp))
  624. dev_pm_opp_put(old_opp);
  625. put_opp_table:
  626. dev_pm_opp_put_opp_table(opp_table);
  627. return ret;
  628. }
  629. EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate);
  630. /* OPP-dev Helpers */
  631. static void _remove_opp_dev(struct opp_device *opp_dev,
  632. struct opp_table *opp_table)
  633. {
  634. opp_debug_unregister(opp_dev, opp_table);
  635. list_del(&opp_dev->node);
  636. kfree(opp_dev);
  637. }
  638. struct opp_device *_add_opp_dev(const struct device *dev,
  639. struct opp_table *opp_table)
  640. {
  641. struct opp_device *opp_dev;
  642. int ret;
  643. opp_dev = kzalloc(sizeof(*opp_dev), GFP_KERNEL);
  644. if (!opp_dev)
  645. return NULL;
  646. /* Initialize opp-dev */
  647. opp_dev->dev = dev;
  648. mutex_lock(&opp_table->lock);
  649. list_add(&opp_dev->node, &opp_table->dev_list);
  650. /* Create debugfs entries for the opp_table */
  651. ret = opp_debug_register(opp_dev, opp_table);
  652. if (ret)
  653. dev_err(dev, "%s: Failed to register opp debugfs (%d)\n",
  654. __func__, ret);
  655. mutex_unlock(&opp_table->lock);
  656. return opp_dev;
  657. }
  658. static struct opp_table *_allocate_opp_table(struct device *dev, int index)
  659. {
  660. struct opp_table *opp_table;
  661. struct opp_device *opp_dev;
  662. int ret;
  663. /*
  664. * Allocate a new OPP table. In the infrequent case where a new
  665. * device is needed to be added, we pay this penalty.
  666. */
  667. opp_table = kzalloc(sizeof(*opp_table), GFP_KERNEL);
  668. if (!opp_table)
  669. return NULL;
  670. mutex_init(&opp_table->lock);
  671. INIT_LIST_HEAD(&opp_table->dev_list);
  672. opp_dev = _add_opp_dev(dev, opp_table);
  673. if (!opp_dev) {
  674. kfree(opp_table);
  675. return NULL;
  676. }
  677. _of_init_opp_table(opp_table, dev, index);
  678. /* Find clk for the device */
  679. opp_table->clk = clk_get(dev, NULL);
  680. if (IS_ERR(opp_table->clk)) {
  681. ret = PTR_ERR(opp_table->clk);
  682. if (ret != -EPROBE_DEFER)
  683. dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__,
  684. ret);
  685. }
  686. BLOCKING_INIT_NOTIFIER_HEAD(&opp_table->head);
  687. INIT_LIST_HEAD(&opp_table->opp_list);
  688. kref_init(&opp_table->kref);
  689. /* Secure the device table modification */
  690. list_add(&opp_table->node, &opp_tables);
  691. return opp_table;
  692. }
  693. void _get_opp_table_kref(struct opp_table *opp_table)
  694. {
  695. kref_get(&opp_table->kref);
  696. }
  697. static struct opp_table *_opp_get_opp_table(struct device *dev, int index)
  698. {
  699. struct opp_table *opp_table;
  700. /* Hold our table modification lock here */
  701. mutex_lock(&opp_table_lock);
  702. opp_table = _find_opp_table_unlocked(dev);
  703. if (!IS_ERR(opp_table))
  704. goto unlock;
  705. opp_table = _allocate_opp_table(dev, index);
  706. unlock:
  707. mutex_unlock(&opp_table_lock);
  708. return opp_table;
  709. }
  710. struct opp_table *dev_pm_opp_get_opp_table(struct device *dev)
  711. {
  712. return _opp_get_opp_table(dev, 0);
  713. }
  714. EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_table);
  715. struct opp_table *dev_pm_opp_get_opp_table_indexed(struct device *dev,
  716. int index)
  717. {
  718. return _opp_get_opp_table(dev, index);
  719. }
  720. static void _opp_table_kref_release(struct kref *kref)
  721. {
  722. struct opp_table *opp_table = container_of(kref, struct opp_table, kref);
  723. struct opp_device *opp_dev;
  724. /* Release clk */
  725. if (!IS_ERR(opp_table->clk))
  726. clk_put(opp_table->clk);
  727. /*
  728. * No need to take opp_table->lock here as we are guaranteed that no
  729. * references to the OPP table are taken at this point.
  730. */
  731. opp_dev = list_first_entry(&opp_table->dev_list, struct opp_device,
  732. node);
  733. _remove_opp_dev(opp_dev, opp_table);
  734. /* dev_list must be empty now */
  735. WARN_ON(!list_empty(&opp_table->dev_list));
  736. mutex_destroy(&opp_table->lock);
  737. list_del(&opp_table->node);
  738. kfree(opp_table);
  739. mutex_unlock(&opp_table_lock);
  740. }
  741. void _opp_remove_all_static(struct opp_table *opp_table)
  742. {
  743. struct dev_pm_opp *opp, *tmp;
  744. list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) {
  745. if (!opp->dynamic)
  746. dev_pm_opp_put(opp);
  747. }
  748. opp_table->parsed_static_opps = false;
  749. }
  750. static void _opp_table_list_kref_release(struct kref *kref)
  751. {
  752. struct opp_table *opp_table = container_of(kref, struct opp_table,
  753. list_kref);
  754. _opp_remove_all_static(opp_table);
  755. mutex_unlock(&opp_table_lock);
  756. }
  757. void _put_opp_list_kref(struct opp_table *opp_table)
  758. {
  759. kref_put_mutex(&opp_table->list_kref, _opp_table_list_kref_release,
  760. &opp_table_lock);
  761. }
  762. void dev_pm_opp_put_opp_table(struct opp_table *opp_table)
  763. {
  764. kref_put_mutex(&opp_table->kref, _opp_table_kref_release,
  765. &opp_table_lock);
  766. }
  767. EXPORT_SYMBOL_GPL(dev_pm_opp_put_opp_table);
  768. void _opp_free(struct dev_pm_opp *opp)
  769. {
  770. kfree(opp);
  771. }
  772. static void _opp_kref_release(struct kref *kref)
  773. {
  774. struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref);
  775. struct opp_table *opp_table = opp->opp_table;
  776. /*
  777. * Notify the changes in the availability of the operable
  778. * frequency/voltage list.
  779. */
  780. blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_REMOVE, opp);
  781. opp_debug_remove_one(opp);
  782. list_del(&opp->node);
  783. kfree(opp);
  784. mutex_unlock(&opp_table->lock);
  785. }
  786. void dev_pm_opp_get(struct dev_pm_opp *opp)
  787. {
  788. kref_get(&opp->kref);
  789. }
  790. void dev_pm_opp_put(struct dev_pm_opp *opp)
  791. {
  792. kref_put_mutex(&opp->kref, _opp_kref_release, &opp->opp_table->lock);
  793. }
  794. EXPORT_SYMBOL_GPL(dev_pm_opp_put);
  795. /**
  796. * dev_pm_opp_remove() - Remove an OPP from OPP table
  797. * @dev: device for which we do this operation
  798. * @freq: OPP to remove with matching 'freq'
  799. *
  800. * This function removes an opp from the opp table.
  801. */
  802. void dev_pm_opp_remove(struct device *dev, unsigned long freq)
  803. {
  804. struct dev_pm_opp *opp;
  805. struct opp_table *opp_table;
  806. bool found = false;
  807. opp_table = _find_opp_table(dev);
  808. if (IS_ERR(opp_table))
  809. return;
  810. mutex_lock(&opp_table->lock);
  811. list_for_each_entry(opp, &opp_table->opp_list, node) {
  812. if (opp->rate == freq) {
  813. found = true;
  814. break;
  815. }
  816. }
  817. mutex_unlock(&opp_table->lock);
  818. if (found) {
  819. dev_pm_opp_put(opp);
  820. /* Drop the reference taken by dev_pm_opp_add() */
  821. dev_pm_opp_put_opp_table(opp_table);
  822. } else {
  823. dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n",
  824. __func__, freq);
  825. }
  826. /* Drop the reference taken by _find_opp_table() */
  827. dev_pm_opp_put_opp_table(opp_table);
  828. }
  829. EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
  830. struct dev_pm_opp *_opp_allocate(struct opp_table *table)
  831. {
  832. struct dev_pm_opp *opp;
  833. int count, supply_size;
  834. /* Allocate space for at least one supply */
  835. count = table->regulator_count ? table->regulator_count : 1;
  836. supply_size = sizeof(*opp->supplies) * count;
  837. /* allocate new OPP node and supplies structures */
  838. opp = kzalloc(sizeof(*opp) + supply_size, GFP_KERNEL);
  839. if (!opp)
  840. return NULL;
  841. /* Put the supplies at the end of the OPP structure as an empty array */
  842. opp->supplies = (struct dev_pm_opp_supply *)(opp + 1);
  843. INIT_LIST_HEAD(&opp->node);
  844. return opp;
  845. }
  846. static bool _opp_supported_by_regulators(struct dev_pm_opp *opp,
  847. struct opp_table *opp_table)
  848. {
  849. struct regulator *reg;
  850. int i;
  851. for (i = 0; i < opp_table->regulator_count; i++) {
  852. reg = opp_table->regulators[i];
  853. if (!regulator_is_supported_voltage(reg,
  854. opp->supplies[i].u_volt_min,
  855. opp->supplies[i].u_volt_max)) {
  856. pr_warn("%s: OPP minuV: %lu maxuV: %lu, not supported by regulator\n",
  857. __func__, opp->supplies[i].u_volt_min,
  858. opp->supplies[i].u_volt_max);
  859. return false;
  860. }
  861. }
  862. return true;
  863. }
  864. static int _opp_is_duplicate(struct device *dev, struct dev_pm_opp *new_opp,
  865. struct opp_table *opp_table,
  866. struct list_head **head)
  867. {
  868. struct dev_pm_opp *opp;
  869. /*
  870. * Insert new OPP in order of increasing frequency and discard if
  871. * already present.
  872. *
  873. * Need to use &opp_table->opp_list in the condition part of the 'for'
  874. * loop, don't replace it with head otherwise it will become an infinite
  875. * loop.
  876. */
  877. list_for_each_entry(opp, &opp_table->opp_list, node) {
  878. if (new_opp->rate > opp->rate) {
  879. *head = &opp->node;
  880. continue;
  881. }
  882. if (new_opp->rate < opp->rate)
  883. return 0;
  884. /* Duplicate OPPs */
  885. dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n",
  886. __func__, opp->rate, opp->supplies[0].u_volt,
  887. opp->available, new_opp->rate,
  888. new_opp->supplies[0].u_volt, new_opp->available);
  889. /* Should we compare voltages for all regulators here ? */
  890. return opp->available &&
  891. new_opp->supplies[0].u_volt == opp->supplies[0].u_volt ? -EBUSY : -EEXIST;
  892. }
  893. return 0;
  894. }
  895. /*
  896. * Returns:
  897. * 0: On success. And appropriate error message for duplicate OPPs.
  898. * -EBUSY: For OPP with same freq/volt and is available. The callers of
  899. * _opp_add() must return 0 if they receive -EBUSY from it. This is to make
  900. * sure we don't print error messages unnecessarily if different parts of
  901. * kernel try to initialize the OPP table.
  902. * -EEXIST: For OPP with same freq but different volt or is unavailable. This
  903. * should be considered an error by the callers of _opp_add().
  904. */
  905. int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
  906. struct opp_table *opp_table, bool rate_not_available)
  907. {
  908. struct list_head *head;
  909. int ret;
  910. mutex_lock(&opp_table->lock);
  911. head = &opp_table->opp_list;
  912. if (likely(!rate_not_available)) {
  913. ret = _opp_is_duplicate(dev, new_opp, opp_table, &head);
  914. if (ret) {
  915. mutex_unlock(&opp_table->lock);
  916. return ret;
  917. }
  918. }
  919. list_add(&new_opp->node, head);
  920. mutex_unlock(&opp_table->lock);
  921. new_opp->opp_table = opp_table;
  922. kref_init(&new_opp->kref);
  923. ret = opp_debug_create_one(new_opp, opp_table);
  924. if (ret)
  925. dev_err(dev, "%s: Failed to register opp to debugfs (%d)\n",
  926. __func__, ret);
  927. if (!_opp_supported_by_regulators(new_opp, opp_table)) {
  928. new_opp->available = false;
  929. dev_warn(dev, "%s: OPP not supported by regulators (%lu)\n",
  930. __func__, new_opp->rate);
  931. }
  932. return 0;
  933. }
  934. /**
  935. * _opp_add_v1() - Allocate a OPP based on v1 bindings.
  936. * @opp_table: OPP table
  937. * @dev: device for which we do this operation
  938. * @freq: Frequency in Hz for this OPP
  939. * @u_volt: Voltage in uVolts for this OPP
  940. * @dynamic: Dynamically added OPPs.
  941. *
  942. * This function adds an opp definition to the opp table and returns status.
  943. * The opp is made available by default and it can be controlled using
  944. * dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove.
  945. *
  946. * NOTE: "dynamic" parameter impacts OPPs added by the dev_pm_opp_of_add_table
  947. * and freed by dev_pm_opp_of_remove_table.
  948. *
  949. * Return:
  950. * 0 On success OR
  951. * Duplicate OPPs (both freq and volt are same) and opp->available
  952. * -EEXIST Freq are same and volt are different OR
  953. * Duplicate OPPs (both freq and volt are same) and !opp->available
  954. * -ENOMEM Memory allocation failure
  955. */
  956. int _opp_add_v1(struct opp_table *opp_table, struct device *dev,
  957. unsigned long freq, long u_volt, bool dynamic)
  958. {
  959. struct dev_pm_opp *new_opp;
  960. unsigned long tol;
  961. int ret;
  962. new_opp = _opp_allocate(opp_table);
  963. if (!new_opp)
  964. return -ENOMEM;
  965. /* populate the opp table */
  966. new_opp->rate = freq;
  967. tol = u_volt * opp_table->voltage_tolerance_v1 / 100;
  968. new_opp->supplies[0].u_volt = u_volt;
  969. new_opp->supplies[0].u_volt_min = u_volt - tol;
  970. new_opp->supplies[0].u_volt_max = u_volt + tol;
  971. new_opp->available = true;
  972. new_opp->dynamic = dynamic;
  973. ret = _opp_add(dev, new_opp, opp_table, false);
  974. if (ret) {
  975. /* Don't return error for duplicate OPPs */
  976. if (ret == -EBUSY)
  977. ret = 0;
  978. goto free_opp;
  979. }
  980. /*
  981. * Notify the changes in the availability of the operable
  982. * frequency/voltage list.
  983. */
  984. blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ADD, new_opp);
  985. return 0;
  986. free_opp:
  987. _opp_free(new_opp);
  988. return ret;
  989. }
  990. /**
  991. * dev_pm_opp_set_supported_hw() - Set supported platforms
  992. * @dev: Device for which supported-hw has to be set.
  993. * @versions: Array of hierarchy of versions to match.
  994. * @count: Number of elements in the array.
  995. *
  996. * This is required only for the V2 bindings, and it enables a platform to
  997. * specify the hierarchy of versions it supports. OPP layer will then enable
  998. * OPPs, which are available for those versions, based on its 'opp-supported-hw'
  999. * property.
  1000. */
  1001. struct opp_table *dev_pm_opp_set_supported_hw(struct device *dev,
  1002. const u32 *versions, unsigned int count)
  1003. {
  1004. struct opp_table *opp_table;
  1005. opp_table = dev_pm_opp_get_opp_table(dev);
  1006. if (!opp_table)
  1007. return ERR_PTR(-ENOMEM);
  1008. /* Make sure there are no concurrent readers while updating opp_table */
  1009. WARN_ON(!list_empty(&opp_table->opp_list));
  1010. /* Another CPU that shares the OPP table has set the property ? */
  1011. if (opp_table->supported_hw)
  1012. return opp_table;
  1013. opp_table->supported_hw = kmemdup(versions, count * sizeof(*versions),
  1014. GFP_KERNEL);
  1015. if (!opp_table->supported_hw) {
  1016. dev_pm_opp_put_opp_table(opp_table);
  1017. return ERR_PTR(-ENOMEM);
  1018. }
  1019. opp_table->supported_hw_count = count;
  1020. return opp_table;
  1021. }
  1022. EXPORT_SYMBOL_GPL(dev_pm_opp_set_supported_hw);
  1023. /**
  1024. * dev_pm_opp_put_supported_hw() - Releases resources blocked for supported hw
  1025. * @opp_table: OPP table returned by dev_pm_opp_set_supported_hw().
  1026. *
  1027. * This is required only for the V2 bindings, and is called for a matching
  1028. * dev_pm_opp_set_supported_hw(). Until this is called, the opp_table structure
  1029. * will not be freed.
  1030. */
  1031. void dev_pm_opp_put_supported_hw(struct opp_table *opp_table)
  1032. {
  1033. /* Make sure there are no concurrent readers while updating opp_table */
  1034. WARN_ON(!list_empty(&opp_table->opp_list));
  1035. kfree(opp_table->supported_hw);
  1036. opp_table->supported_hw = NULL;
  1037. opp_table->supported_hw_count = 0;
  1038. dev_pm_opp_put_opp_table(opp_table);
  1039. }
  1040. EXPORT_SYMBOL_GPL(dev_pm_opp_put_supported_hw);
  1041. /**
  1042. * dev_pm_opp_set_prop_name() - Set prop-extn name
  1043. * @dev: Device for which the prop-name has to be set.
  1044. * @name: name to postfix to properties.
  1045. *
  1046. * This is required only for the V2 bindings, and it enables a platform to
  1047. * specify the extn to be used for certain property names. The properties to
  1048. * which the extension will apply are opp-microvolt and opp-microamp. OPP core
  1049. * should postfix the property name with -<name> while looking for them.
  1050. */
  1051. struct opp_table *dev_pm_opp_set_prop_name(struct device *dev, const char *name)
  1052. {
  1053. struct opp_table *opp_table;
  1054. opp_table = dev_pm_opp_get_opp_table(dev);
  1055. if (!opp_table)
  1056. return ERR_PTR(-ENOMEM);
  1057. /* Make sure there are no concurrent readers while updating opp_table */
  1058. WARN_ON(!list_empty(&opp_table->opp_list));
  1059. /* Another CPU that shares the OPP table has set the property ? */
  1060. if (opp_table->prop_name)
  1061. return opp_table;
  1062. opp_table->prop_name = kstrdup(name, GFP_KERNEL);
  1063. if (!opp_table->prop_name) {
  1064. dev_pm_opp_put_opp_table(opp_table);
  1065. return ERR_PTR(-ENOMEM);
  1066. }
  1067. return opp_table;
  1068. }
  1069. EXPORT_SYMBOL_GPL(dev_pm_opp_set_prop_name);
  1070. /**
  1071. * dev_pm_opp_put_prop_name() - Releases resources blocked for prop-name
  1072. * @opp_table: OPP table returned by dev_pm_opp_set_prop_name().
  1073. *
  1074. * This is required only for the V2 bindings, and is called for a matching
  1075. * dev_pm_opp_set_prop_name(). Until this is called, the opp_table structure
  1076. * will not be freed.
  1077. */
  1078. void dev_pm_opp_put_prop_name(struct opp_table *opp_table)
  1079. {
  1080. /* Make sure there are no concurrent readers while updating opp_table */
  1081. WARN_ON(!list_empty(&opp_table->opp_list));
  1082. kfree(opp_table->prop_name);
  1083. opp_table->prop_name = NULL;
  1084. dev_pm_opp_put_opp_table(opp_table);
  1085. }
  1086. EXPORT_SYMBOL_GPL(dev_pm_opp_put_prop_name);
  1087. static int _allocate_set_opp_data(struct opp_table *opp_table)
  1088. {
  1089. struct dev_pm_set_opp_data *data;
  1090. int len, count = opp_table->regulator_count;
  1091. if (WARN_ON(!count))
  1092. return -EINVAL;
  1093. /* space for set_opp_data */
  1094. len = sizeof(*data);
  1095. /* space for old_opp.supplies and new_opp.supplies */
  1096. len += 2 * sizeof(struct dev_pm_opp_supply) * count;
  1097. data = kzalloc(len, GFP_KERNEL);
  1098. if (!data)
  1099. return -ENOMEM;
  1100. data->old_opp.supplies = (void *)(data + 1);
  1101. data->new_opp.supplies = data->old_opp.supplies + count;
  1102. opp_table->set_opp_data = data;
  1103. return 0;
  1104. }
  1105. static void _free_set_opp_data(struct opp_table *opp_table)
  1106. {
  1107. kfree(opp_table->set_opp_data);
  1108. opp_table->set_opp_data = NULL;
  1109. }
  1110. /**
  1111. * dev_pm_opp_set_regulators() - Set regulator names for the device
  1112. * @dev: Device for which regulator name is being set.
  1113. * @names: Array of pointers to the names of the regulator.
  1114. * @count: Number of regulators.
  1115. *
  1116. * In order to support OPP switching, OPP layer needs to know the name of the
  1117. * device's regulators, as the core would be required to switch voltages as
  1118. * well.
  1119. *
  1120. * This must be called before any OPPs are initialized for the device.
  1121. */
  1122. struct opp_table *dev_pm_opp_set_regulators(struct device *dev,
  1123. const char * const names[],
  1124. unsigned int count)
  1125. {
  1126. struct opp_table *opp_table;
  1127. struct regulator *reg;
  1128. int ret, i;
  1129. opp_table = dev_pm_opp_get_opp_table(dev);
  1130. if (!opp_table)
  1131. return ERR_PTR(-ENOMEM);
  1132. /* This should be called before OPPs are initialized */
  1133. if (WARN_ON(!list_empty(&opp_table->opp_list))) {
  1134. ret = -EBUSY;
  1135. goto err;
  1136. }
  1137. /* Another CPU that shares the OPP table has set the regulators ? */
  1138. if (opp_table->regulators)
  1139. return opp_table;
  1140. opp_table->regulators = kmalloc_array(count,
  1141. sizeof(*opp_table->regulators),
  1142. GFP_KERNEL);
  1143. if (!opp_table->regulators) {
  1144. ret = -ENOMEM;
  1145. goto err;
  1146. }
  1147. for (i = 0; i < count; i++) {
  1148. reg = regulator_get_optional(dev, names[i]);
  1149. if (IS_ERR(reg)) {
  1150. ret = PTR_ERR(reg);
  1151. if (ret != -EPROBE_DEFER)
  1152. dev_err(dev, "%s: no regulator (%s) found: %d\n",
  1153. __func__, names[i], ret);
  1154. goto free_regulators;
  1155. }
  1156. opp_table->regulators[i] = reg;
  1157. }
  1158. opp_table->regulator_count = count;
  1159. /* Allocate block only once to pass to set_opp() routines */
  1160. ret = _allocate_set_opp_data(opp_table);
  1161. if (ret)
  1162. goto free_regulators;
  1163. return opp_table;
  1164. free_regulators:
  1165. while (i != 0)
  1166. regulator_put(opp_table->regulators[--i]);
  1167. kfree(opp_table->regulators);
  1168. opp_table->regulators = NULL;
  1169. opp_table->regulator_count = 0;
  1170. err:
  1171. dev_pm_opp_put_opp_table(opp_table);
  1172. return ERR_PTR(ret);
  1173. }
  1174. EXPORT_SYMBOL_GPL(dev_pm_opp_set_regulators);
  1175. /**
  1176. * dev_pm_opp_put_regulators() - Releases resources blocked for regulator
  1177. * @opp_table: OPP table returned from dev_pm_opp_set_regulators().
  1178. */
  1179. void dev_pm_opp_put_regulators(struct opp_table *opp_table)
  1180. {
  1181. int i;
  1182. if (!opp_table->regulators)
  1183. goto put_opp_table;
  1184. /* Make sure there are no concurrent readers while updating opp_table */
  1185. WARN_ON(!list_empty(&opp_table->opp_list));
  1186. for (i = opp_table->regulator_count - 1; i >= 0; i--)
  1187. regulator_put(opp_table->regulators[i]);
  1188. _free_set_opp_data(opp_table);
  1189. kfree(opp_table->regulators);
  1190. opp_table->regulators = NULL;
  1191. opp_table->regulator_count = 0;
  1192. put_opp_table:
  1193. dev_pm_opp_put_opp_table(opp_table);
  1194. }
  1195. EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulators);
  1196. /**
  1197. * dev_pm_opp_set_clkname() - Set clk name for the device
  1198. * @dev: Device for which clk name is being set.
  1199. * @name: Clk name.
  1200. *
  1201. * In order to support OPP switching, OPP layer needs to get pointer to the
  1202. * clock for the device. Simple cases work fine without using this routine (i.e.
  1203. * by passing connection-id as NULL), but for a device with multiple clocks
  1204. * available, the OPP core needs to know the exact name of the clk to use.
  1205. *
  1206. * This must be called before any OPPs are initialized for the device.
  1207. */
  1208. struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char *name)
  1209. {
  1210. struct opp_table *opp_table;
  1211. int ret;
  1212. opp_table = dev_pm_opp_get_opp_table(dev);
  1213. if (!opp_table)
  1214. return ERR_PTR(-ENOMEM);
  1215. /* This should be called before OPPs are initialized */
  1216. if (WARN_ON(!list_empty(&opp_table->opp_list))) {
  1217. ret = -EBUSY;
  1218. goto err;
  1219. }
  1220. /* Already have default clk set, free it */
  1221. if (!IS_ERR(opp_table->clk))
  1222. clk_put(opp_table->clk);
  1223. /* Find clk for the device */
  1224. opp_table->clk = clk_get(dev, name);
  1225. if (IS_ERR(opp_table->clk)) {
  1226. ret = PTR_ERR(opp_table->clk);
  1227. if (ret != -EPROBE_DEFER) {
  1228. dev_err(dev, "%s: Couldn't find clock: %d\n", __func__,
  1229. ret);
  1230. }
  1231. goto err;
  1232. }
  1233. return opp_table;
  1234. err:
  1235. dev_pm_opp_put_opp_table(opp_table);
  1236. return ERR_PTR(ret);
  1237. }
  1238. EXPORT_SYMBOL_GPL(dev_pm_opp_set_clkname);
  1239. /**
  1240. * dev_pm_opp_put_clkname() - Releases resources blocked for clk.
  1241. * @opp_table: OPP table returned from dev_pm_opp_set_clkname().
  1242. */
  1243. void dev_pm_opp_put_clkname(struct opp_table *opp_table)
  1244. {
  1245. /* Make sure there are no concurrent readers while updating opp_table */
  1246. WARN_ON(!list_empty(&opp_table->opp_list));
  1247. clk_put(opp_table->clk);
  1248. opp_table->clk = ERR_PTR(-EINVAL);
  1249. dev_pm_opp_put_opp_table(opp_table);
  1250. }
  1251. EXPORT_SYMBOL_GPL(dev_pm_opp_put_clkname);
  1252. /**
  1253. * dev_pm_opp_register_set_opp_helper() - Register custom set OPP helper
  1254. * @dev: Device for which the helper is getting registered.
  1255. * @set_opp: Custom set OPP helper.
  1256. *
  1257. * This is useful to support complex platforms (like platforms with multiple
  1258. * regulators per device), instead of the generic OPP set rate helper.
  1259. *
  1260. * This must be called before any OPPs are initialized for the device.
  1261. */
  1262. struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev,
  1263. int (*set_opp)(struct dev_pm_set_opp_data *data))
  1264. {
  1265. struct opp_table *opp_table;
  1266. if (!set_opp)
  1267. return ERR_PTR(-EINVAL);
  1268. opp_table = dev_pm_opp_get_opp_table(dev);
  1269. if (!opp_table)
  1270. return ERR_PTR(-ENOMEM);
  1271. /* This should be called before OPPs are initialized */
  1272. if (WARN_ON(!list_empty(&opp_table->opp_list))) {
  1273. dev_pm_opp_put_opp_table(opp_table);
  1274. return ERR_PTR(-EBUSY);
  1275. }
  1276. /* Another CPU that shares the OPP table has set the helper ? */
  1277. if (!opp_table->set_opp)
  1278. opp_table->set_opp = set_opp;
  1279. return opp_table;
  1280. }
  1281. EXPORT_SYMBOL_GPL(dev_pm_opp_register_set_opp_helper);
  1282. /**
  1283. * dev_pm_opp_unregister_set_opp_helper() - Releases resources blocked for
  1284. * set_opp helper
  1285. * @opp_table: OPP table returned from dev_pm_opp_register_set_opp_helper().
  1286. *
  1287. * Release resources blocked for platform specific set_opp helper.
  1288. */
  1289. void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table)
  1290. {
  1291. /* Make sure there are no concurrent readers while updating opp_table */
  1292. WARN_ON(!list_empty(&opp_table->opp_list));
  1293. opp_table->set_opp = NULL;
  1294. dev_pm_opp_put_opp_table(opp_table);
  1295. }
  1296. EXPORT_SYMBOL_GPL(dev_pm_opp_unregister_set_opp_helper);
  1297. /**
  1298. * dev_pm_opp_add() - Add an OPP table from a table definitions
  1299. * @dev: device for which we do this operation
  1300. * @freq: Frequency in Hz for this OPP
  1301. * @u_volt: Voltage in uVolts for this OPP
  1302. *
  1303. * This function adds an opp definition to the opp table and returns status.
  1304. * The opp is made available by default and it can be controlled using
  1305. * dev_pm_opp_enable/disable functions.
  1306. *
  1307. * Return:
  1308. * 0 On success OR
  1309. * Duplicate OPPs (both freq and volt are same) and opp->available
  1310. * -EEXIST Freq are same and volt are different OR
  1311. * Duplicate OPPs (both freq and volt are same) and !opp->available
  1312. * -ENOMEM Memory allocation failure
  1313. */
  1314. int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
  1315. {
  1316. struct opp_table *opp_table;
  1317. int ret;
  1318. opp_table = dev_pm_opp_get_opp_table(dev);
  1319. if (!opp_table)
  1320. return -ENOMEM;
  1321. ret = _opp_add_v1(opp_table, dev, freq, u_volt, true);
  1322. if (ret)
  1323. dev_pm_opp_put_opp_table(opp_table);
  1324. return ret;
  1325. }
  1326. EXPORT_SYMBOL_GPL(dev_pm_opp_add);
  1327. /**
  1328. * _opp_set_availability() - helper to set the availability of an opp
  1329. * @dev: device for which we do this operation
  1330. * @freq: OPP frequency to modify availability
  1331. * @availability_req: availability status requested for this opp
  1332. *
  1333. * Set the availability of an OPP, opp_{enable,disable} share a common logic
  1334. * which is isolated here.
  1335. *
  1336. * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
  1337. * copy operation, returns 0 if no modification was done OR modification was
  1338. * successful.
  1339. */
  1340. static int _opp_set_availability(struct device *dev, unsigned long freq,
  1341. bool availability_req)
  1342. {
  1343. struct opp_table *opp_table;
  1344. struct dev_pm_opp *tmp_opp, *opp = ERR_PTR(-ENODEV);
  1345. int r = 0;
  1346. /* Find the opp_table */
  1347. opp_table = _find_opp_table(dev);
  1348. if (IS_ERR(opp_table)) {
  1349. r = PTR_ERR(opp_table);
  1350. dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
  1351. return r;
  1352. }
  1353. mutex_lock(&opp_table->lock);
  1354. /* Do we have the frequency? */
  1355. list_for_each_entry(tmp_opp, &opp_table->opp_list, node) {
  1356. if (tmp_opp->rate == freq) {
  1357. opp = tmp_opp;
  1358. break;
  1359. }
  1360. }
  1361. if (IS_ERR(opp)) {
  1362. r = PTR_ERR(opp);
  1363. goto unlock;
  1364. }
  1365. /* Is update really needed? */
  1366. if (opp->available == availability_req)
  1367. goto unlock;
  1368. opp->available = availability_req;
  1369. dev_pm_opp_get(opp);
  1370. mutex_unlock(&opp_table->lock);
  1371. /* Notify the change of the OPP availability */
  1372. if (availability_req)
  1373. blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ENABLE,
  1374. opp);
  1375. else
  1376. blocking_notifier_call_chain(&opp_table->head,
  1377. OPP_EVENT_DISABLE, opp);
  1378. dev_pm_opp_put(opp);
  1379. goto put_table;
  1380. unlock:
  1381. mutex_unlock(&opp_table->lock);
  1382. put_table:
  1383. dev_pm_opp_put_opp_table(opp_table);
  1384. return r;
  1385. }
  1386. /**
  1387. * dev_pm_opp_enable() - Enable a specific OPP
  1388. * @dev: device for which we do this operation
  1389. * @freq: OPP frequency to enable
  1390. *
  1391. * Enables a provided opp. If the operation is valid, this returns 0, else the
  1392. * corresponding error value. It is meant to be used for users an OPP available
  1393. * after being temporarily made unavailable with dev_pm_opp_disable.
  1394. *
  1395. * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
  1396. * copy operation, returns 0 if no modification was done OR modification was
  1397. * successful.
  1398. */
  1399. int dev_pm_opp_enable(struct device *dev, unsigned long freq)
  1400. {
  1401. return _opp_set_availability(dev, freq, true);
  1402. }
  1403. EXPORT_SYMBOL_GPL(dev_pm_opp_enable);
  1404. /**
  1405. * dev_pm_opp_disable() - Disable a specific OPP
  1406. * @dev: device for which we do this operation
  1407. * @freq: OPP frequency to disable
  1408. *
  1409. * Disables a provided opp. If the operation is valid, this returns
  1410. * 0, else the corresponding error value. It is meant to be a temporary
  1411. * control by users to make this OPP not available until the circumstances are
  1412. * right to make it available again (with a call to dev_pm_opp_enable).
  1413. *
  1414. * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
  1415. * copy operation, returns 0 if no modification was done OR modification was
  1416. * successful.
  1417. */
  1418. int dev_pm_opp_disable(struct device *dev, unsigned long freq)
  1419. {
  1420. return _opp_set_availability(dev, freq, false);
  1421. }
  1422. EXPORT_SYMBOL_GPL(dev_pm_opp_disable);
  1423. /**
  1424. * dev_pm_opp_register_notifier() - Register OPP notifier for the device
  1425. * @dev: Device for which notifier needs to be registered
  1426. * @nb: Notifier block to be registered
  1427. *
  1428. * Return: 0 on success or a negative error value.
  1429. */
  1430. int dev_pm_opp_register_notifier(struct device *dev, struct notifier_block *nb)
  1431. {
  1432. struct opp_table *opp_table;
  1433. int ret;
  1434. opp_table = _find_opp_table(dev);
  1435. if (IS_ERR(opp_table))
  1436. return PTR_ERR(opp_table);
  1437. ret = blocking_notifier_chain_register(&opp_table->head, nb);
  1438. dev_pm_opp_put_opp_table(opp_table);
  1439. return ret;
  1440. }
  1441. EXPORT_SYMBOL(dev_pm_opp_register_notifier);
  1442. /**
  1443. * dev_pm_opp_unregister_notifier() - Unregister OPP notifier for the device
  1444. * @dev: Device for which notifier needs to be unregistered
  1445. * @nb: Notifier block to be unregistered
  1446. *
  1447. * Return: 0 on success or a negative error value.
  1448. */
  1449. int dev_pm_opp_unregister_notifier(struct device *dev,
  1450. struct notifier_block *nb)
  1451. {
  1452. struct opp_table *opp_table;
  1453. int ret;
  1454. opp_table = _find_opp_table(dev);
  1455. if (IS_ERR(opp_table))
  1456. return PTR_ERR(opp_table);
  1457. ret = blocking_notifier_chain_unregister(&opp_table->head, nb);
  1458. dev_pm_opp_put_opp_table(opp_table);
  1459. return ret;
  1460. }
  1461. EXPORT_SYMBOL(dev_pm_opp_unregister_notifier);
  1462. /*
  1463. * Free OPPs either created using static entries present in DT.
  1464. */
  1465. void _dev_pm_opp_remove_table(struct opp_table *opp_table, struct device *dev)
  1466. {
  1467. /* Protect dev_list */
  1468. mutex_lock(&opp_table->lock);
  1469. /* Find if opp_table manages a single device */
  1470. if (list_is_singular(&opp_table->dev_list)) {
  1471. /* Free static OPPs */
  1472. _put_opp_list_kref(opp_table);
  1473. /*
  1474. * The OPP table is getting removed, drop the performance state
  1475. * constraints.
  1476. */
  1477. if (opp_table->genpd_performance_state)
  1478. dev_pm_genpd_set_performance_state(dev, 0);
  1479. } else {
  1480. _put_opp_list_kref(opp_table);
  1481. _remove_opp_dev(_find_opp_dev(dev, opp_table), opp_table);
  1482. }
  1483. mutex_unlock(&opp_table->lock);
  1484. }
  1485. void _dev_pm_opp_find_and_remove_table(struct device *dev)
  1486. {
  1487. struct opp_table *opp_table;
  1488. /* Check for existing table for 'dev' */
  1489. opp_table = _find_opp_table(dev);
  1490. if (IS_ERR(opp_table)) {
  1491. int error = PTR_ERR(opp_table);
  1492. if (error != -ENODEV)
  1493. WARN(1, "%s: opp_table: %d\n",
  1494. IS_ERR_OR_NULL(dev) ?
  1495. "Invalid device" : dev_name(dev),
  1496. error);
  1497. return;
  1498. }
  1499. _dev_pm_opp_remove_table(opp_table, dev);
  1500. dev_pm_opp_put_opp_table(opp_table);
  1501. }
  1502. /**
  1503. * dev_pm_opp_remove_table() - Free all OPPs associated with the device
  1504. * @dev: device pointer used to lookup OPP table.
  1505. *
  1506. * Free both OPPs created using static entries present in DT and the
  1507. * dynamically added entries.
  1508. */
  1509. void dev_pm_opp_remove_table(struct device *dev)
  1510. {
  1511. _dev_pm_opp_find_and_remove_table(dev);
  1512. }
  1513. EXPORT_SYMBOL_GPL(dev_pm_opp_remove_table);