core.c 48 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880
  1. /*
  2. * Generic OPP Interface
  3. *
  4. * Copyright (C) 2009-2010 Texas Instruments Incorporated.
  5. * Nishanth Menon
  6. * Romit Dasgupta
  7. * Kevin Hilman
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. */
  13. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  14. #include <linux/clk.h>
  15. #include <linux/errno.h>
  16. #include <linux/err.h>
  17. #include <linux/slab.h>
  18. #include <linux/device.h>
  19. #include <linux/export.h>
  20. #include <linux/pm_domain.h>
  21. #include <linux/regulator/consumer.h>
  22. #include "opp.h"
  23. /*
  24. * The root of the list of all opp-tables. All opp_table structures branch off
  25. * from here, with each opp_table containing the list of opps it supports in
  26. * various states of availability.
  27. */
  28. LIST_HEAD(opp_tables);
  29. /* Lock to allow exclusive modification to the device and opp lists */
  30. DEFINE_MUTEX(opp_table_lock);
  31. static void dev_pm_opp_get(struct dev_pm_opp *opp);
  32. static struct opp_device *_find_opp_dev(const struct device *dev,
  33. struct opp_table *opp_table)
  34. {
  35. struct opp_device *opp_dev;
  36. list_for_each_entry(opp_dev, &opp_table->dev_list, node)
  37. if (opp_dev->dev == dev)
  38. return opp_dev;
  39. return NULL;
  40. }
  41. static struct opp_table *_find_opp_table_unlocked(struct device *dev)
  42. {
  43. struct opp_table *opp_table;
  44. list_for_each_entry(opp_table, &opp_tables, node) {
  45. if (_find_opp_dev(dev, opp_table)) {
  46. _get_opp_table_kref(opp_table);
  47. return opp_table;
  48. }
  49. }
  50. return ERR_PTR(-ENODEV);
  51. }
  52. /**
  53. * _find_opp_table() - find opp_table struct using device pointer
  54. * @dev: device pointer used to lookup OPP table
  55. *
  56. * Search OPP table for one containing matching device.
  57. *
  58. * Return: pointer to 'struct opp_table' if found, otherwise -ENODEV or
  59. * -EINVAL based on type of error.
  60. *
  61. * The callers must call dev_pm_opp_put_opp_table() after the table is used.
  62. */
  63. struct opp_table *_find_opp_table(struct device *dev)
  64. {
  65. struct opp_table *opp_table;
  66. if (IS_ERR_OR_NULL(dev)) {
  67. pr_err("%s: Invalid parameters\n", __func__);
  68. return ERR_PTR(-EINVAL);
  69. }
  70. mutex_lock(&opp_table_lock);
  71. opp_table = _find_opp_table_unlocked(dev);
  72. mutex_unlock(&opp_table_lock);
  73. return opp_table;
  74. }
  75. /**
  76. * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an opp
  77. * @opp: opp for which voltage has to be returned for
  78. *
  79. * Return: voltage in micro volt corresponding to the opp, else
  80. * return 0
  81. *
  82. * This is useful only for devices with single power supply.
  83. */
  84. unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
  85. {
  86. if (IS_ERR_OR_NULL(opp)) {
  87. pr_err("%s: Invalid parameters\n", __func__);
  88. return 0;
  89. }
  90. return opp->supplies[0].u_volt;
  91. }
  92. EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage);
  93. /**
  94. * dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp
  95. * @opp: opp for which frequency has to be returned for
  96. *
  97. * Return: frequency in hertz corresponding to the opp, else
  98. * return 0
  99. */
  100. unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
  101. {
  102. if (IS_ERR_OR_NULL(opp) || !opp->available) {
  103. pr_err("%s: Invalid parameters\n", __func__);
  104. return 0;
  105. }
  106. return opp->rate;
  107. }
  108. EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
  109. /**
  110. * dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not
  111. * @opp: opp for which turbo mode is being verified
  112. *
  113. * Turbo OPPs are not for normal use, and can be enabled (under certain
  114. * conditions) for short duration of times to finish high throughput work
  115. * quickly. Running on them for longer times may overheat the chip.
  116. *
  117. * Return: true if opp is turbo opp, else false.
  118. */
  119. bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp)
  120. {
  121. if (IS_ERR_OR_NULL(opp) || !opp->available) {
  122. pr_err("%s: Invalid parameters\n", __func__);
  123. return false;
  124. }
  125. return opp->turbo;
  126. }
  127. EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo);
  128. /**
  129. * dev_pm_opp_get_max_clock_latency() - Get max clock latency in nanoseconds
  130. * @dev: device for which we do this operation
  131. *
  132. * Return: This function returns the max clock latency in nanoseconds.
  133. */
  134. unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
  135. {
  136. struct opp_table *opp_table;
  137. unsigned long clock_latency_ns;
  138. opp_table = _find_opp_table(dev);
  139. if (IS_ERR(opp_table))
  140. return 0;
  141. clock_latency_ns = opp_table->clock_latency_ns_max;
  142. dev_pm_opp_put_opp_table(opp_table);
  143. return clock_latency_ns;
  144. }
  145. EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency);
  146. /**
  147. * dev_pm_opp_get_max_volt_latency() - Get max voltage latency in nanoseconds
  148. * @dev: device for which we do this operation
  149. *
  150. * Return: This function returns the max voltage latency in nanoseconds.
  151. */
  152. unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
  153. {
  154. struct opp_table *opp_table;
  155. struct dev_pm_opp *opp;
  156. struct regulator *reg;
  157. unsigned long latency_ns = 0;
  158. int ret, i, count;
  159. struct {
  160. unsigned long min;
  161. unsigned long max;
  162. } *uV;
  163. opp_table = _find_opp_table(dev);
  164. if (IS_ERR(opp_table))
  165. return 0;
  166. count = opp_table->regulator_count;
  167. /* Regulator may not be required for the device */
  168. if (!count)
  169. goto put_opp_table;
  170. uV = kmalloc_array(count, sizeof(*uV), GFP_KERNEL);
  171. if (!uV)
  172. goto put_opp_table;
  173. mutex_lock(&opp_table->lock);
  174. for (i = 0; i < count; i++) {
  175. uV[i].min = ~0;
  176. uV[i].max = 0;
  177. list_for_each_entry(opp, &opp_table->opp_list, node) {
  178. if (!opp->available)
  179. continue;
  180. if (opp->supplies[i].u_volt_min < uV[i].min)
  181. uV[i].min = opp->supplies[i].u_volt_min;
  182. if (opp->supplies[i].u_volt_max > uV[i].max)
  183. uV[i].max = opp->supplies[i].u_volt_max;
  184. }
  185. }
  186. mutex_unlock(&opp_table->lock);
  187. /*
  188. * The caller needs to ensure that opp_table (and hence the regulator)
  189. * isn't freed, while we are executing this routine.
  190. */
  191. for (i = 0; i < count; i++) {
  192. reg = opp_table->regulators[i];
  193. ret = regulator_set_voltage_time(reg, uV[i].min, uV[i].max);
  194. if (ret > 0)
  195. latency_ns += ret * 1000;
  196. }
  197. kfree(uV);
  198. put_opp_table:
  199. dev_pm_opp_put_opp_table(opp_table);
  200. return latency_ns;
  201. }
  202. EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_volt_latency);
  203. /**
  204. * dev_pm_opp_get_max_transition_latency() - Get max transition latency in
  205. * nanoseconds
  206. * @dev: device for which we do this operation
  207. *
  208. * Return: This function returns the max transition latency, in nanoseconds, to
  209. * switch from one OPP to other.
  210. */
  211. unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev)
  212. {
  213. return dev_pm_opp_get_max_volt_latency(dev) +
  214. dev_pm_opp_get_max_clock_latency(dev);
  215. }
  216. EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_transition_latency);
  217. /**
  218. * dev_pm_opp_get_suspend_opp_freq() - Get frequency of suspend opp in Hz
  219. * @dev: device for which we do this operation
  220. *
  221. * Return: This function returns the frequency of the OPP marked as suspend_opp
  222. * if one is available, else returns 0;
  223. */
  224. unsigned long dev_pm_opp_get_suspend_opp_freq(struct device *dev)
  225. {
  226. struct opp_table *opp_table;
  227. unsigned long freq = 0;
  228. opp_table = _find_opp_table(dev);
  229. if (IS_ERR(opp_table))
  230. return 0;
  231. if (opp_table->suspend_opp && opp_table->suspend_opp->available)
  232. freq = dev_pm_opp_get_freq(opp_table->suspend_opp);
  233. dev_pm_opp_put_opp_table(opp_table);
  234. return freq;
  235. }
  236. EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp_freq);
  237. /**
  238. * dev_pm_opp_get_opp_count() - Get number of opps available in the opp table
  239. * @dev: device for which we do this operation
  240. *
  241. * Return: This function returns the number of available opps if there are any,
  242. * else returns 0 if none or the corresponding error value.
  243. */
  244. int dev_pm_opp_get_opp_count(struct device *dev)
  245. {
  246. struct opp_table *opp_table;
  247. struct dev_pm_opp *temp_opp;
  248. int count = 0;
  249. opp_table = _find_opp_table(dev);
  250. if (IS_ERR(opp_table)) {
  251. count = PTR_ERR(opp_table);
  252. dev_dbg(dev, "%s: OPP table not found (%d)\n",
  253. __func__, count);
  254. return count;
  255. }
  256. mutex_lock(&opp_table->lock);
  257. list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
  258. if (temp_opp->available)
  259. count++;
  260. }
  261. mutex_unlock(&opp_table->lock);
  262. dev_pm_opp_put_opp_table(opp_table);
  263. return count;
  264. }
  265. EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
  266. /**
  267. * dev_pm_opp_find_freq_exact() - search for an exact frequency
  268. * @dev: device for which we do this operation
  269. * @freq: frequency to search for
  270. * @available: true/false - match for available opp
  271. *
  272. * Return: Searches for exact match in the opp table and returns pointer to the
  273. * matching opp if found, else returns ERR_PTR in case of error and should
  274. * be handled using IS_ERR. Error return values can be:
  275. * EINVAL: for bad pointer
  276. * ERANGE: no match found for search
  277. * ENODEV: if device not found in list of registered devices
  278. *
  279. * Note: available is a modifier for the search. if available=true, then the
  280. * match is for exact matching frequency and is available in the stored OPP
  281. * table. if false, the match is for exact frequency which is not available.
  282. *
  283. * This provides a mechanism to enable an opp which is not available currently
  284. * or the opposite as well.
  285. *
  286. * The callers are required to call dev_pm_opp_put() for the returned OPP after
  287. * use.
  288. */
  289. struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
  290. unsigned long freq,
  291. bool available)
  292. {
  293. struct opp_table *opp_table;
  294. struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
  295. opp_table = _find_opp_table(dev);
  296. if (IS_ERR(opp_table)) {
  297. int r = PTR_ERR(opp_table);
  298. dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r);
  299. return ERR_PTR(r);
  300. }
  301. mutex_lock(&opp_table->lock);
  302. list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
  303. if (temp_opp->available == available &&
  304. temp_opp->rate == freq) {
  305. opp = temp_opp;
  306. /* Increment the reference count of OPP */
  307. dev_pm_opp_get(opp);
  308. break;
  309. }
  310. }
  311. mutex_unlock(&opp_table->lock);
  312. dev_pm_opp_put_opp_table(opp_table);
  313. return opp;
  314. }
  315. EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
  316. static noinline struct dev_pm_opp *_find_freq_ceil(struct opp_table *opp_table,
  317. unsigned long *freq)
  318. {
  319. struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
  320. mutex_lock(&opp_table->lock);
  321. list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
  322. if (temp_opp->available && temp_opp->rate >= *freq) {
  323. opp = temp_opp;
  324. *freq = opp->rate;
  325. /* Increment the reference count of OPP */
  326. dev_pm_opp_get(opp);
  327. break;
  328. }
  329. }
  330. mutex_unlock(&opp_table->lock);
  331. return opp;
  332. }
  333. /**
  334. * dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq
  335. * @dev: device for which we do this operation
  336. * @freq: Start frequency
  337. *
  338. * Search for the matching ceil *available* OPP from a starting freq
  339. * for a device.
  340. *
  341. * Return: matching *opp and refreshes *freq accordingly, else returns
  342. * ERR_PTR in case of error and should be handled using IS_ERR. Error return
  343. * values can be:
  344. * EINVAL: for bad pointer
  345. * ERANGE: no match found for search
  346. * ENODEV: if device not found in list of registered devices
  347. *
  348. * The callers are required to call dev_pm_opp_put() for the returned OPP after
  349. * use.
  350. */
  351. struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
  352. unsigned long *freq)
  353. {
  354. struct opp_table *opp_table;
  355. struct dev_pm_opp *opp;
  356. if (!dev || !freq) {
  357. dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
  358. return ERR_PTR(-EINVAL);
  359. }
  360. opp_table = _find_opp_table(dev);
  361. if (IS_ERR(opp_table))
  362. return ERR_CAST(opp_table);
  363. opp = _find_freq_ceil(opp_table, freq);
  364. dev_pm_opp_put_opp_table(opp_table);
  365. return opp;
  366. }
  367. EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
  368. /**
  369. * dev_pm_opp_find_freq_floor() - Search for a rounded floor freq
  370. * @dev: device for which we do this operation
  371. * @freq: Start frequency
  372. *
  373. * Search for the matching floor *available* OPP from a starting freq
  374. * for a device.
  375. *
  376. * Return: matching *opp and refreshes *freq accordingly, else returns
  377. * ERR_PTR in case of error and should be handled using IS_ERR. Error return
  378. * values can be:
  379. * EINVAL: for bad pointer
  380. * ERANGE: no match found for search
  381. * ENODEV: if device not found in list of registered devices
  382. *
  383. * The callers are required to call dev_pm_opp_put() for the returned OPP after
  384. * use.
  385. */
  386. struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
  387. unsigned long *freq)
  388. {
  389. struct opp_table *opp_table;
  390. struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
  391. if (!dev || !freq) {
  392. dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
  393. return ERR_PTR(-EINVAL);
  394. }
  395. opp_table = _find_opp_table(dev);
  396. if (IS_ERR(opp_table))
  397. return ERR_CAST(opp_table);
  398. mutex_lock(&opp_table->lock);
  399. list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
  400. if (temp_opp->available) {
  401. /* go to the next node, before choosing prev */
  402. if (temp_opp->rate > *freq)
  403. break;
  404. else
  405. opp = temp_opp;
  406. }
  407. }
  408. /* Increment the reference count of OPP */
  409. if (!IS_ERR(opp))
  410. dev_pm_opp_get(opp);
  411. mutex_unlock(&opp_table->lock);
  412. dev_pm_opp_put_opp_table(opp_table);
  413. if (!IS_ERR(opp))
  414. *freq = opp->rate;
  415. return opp;
  416. }
  417. EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
  418. static int _set_opp_voltage(struct device *dev, struct regulator *reg,
  419. struct dev_pm_opp_supply *supply)
  420. {
  421. int ret;
  422. /* Regulator not available for device */
  423. if (IS_ERR(reg)) {
  424. dev_dbg(dev, "%s: regulator not available: %ld\n", __func__,
  425. PTR_ERR(reg));
  426. return 0;
  427. }
  428. dev_dbg(dev, "%s: voltages (mV): %lu %lu %lu\n", __func__,
  429. supply->u_volt_min, supply->u_volt, supply->u_volt_max);
  430. ret = regulator_set_voltage_triplet(reg, supply->u_volt_min,
  431. supply->u_volt, supply->u_volt_max);
  432. if (ret)
  433. dev_err(dev, "%s: failed to set voltage (%lu %lu %lu mV): %d\n",
  434. __func__, supply->u_volt_min, supply->u_volt,
  435. supply->u_volt_max, ret);
  436. return ret;
  437. }
  438. static inline int
  439. _generic_set_opp_clk_only(struct device *dev, struct clk *clk,
  440. unsigned long old_freq, unsigned long freq)
  441. {
  442. int ret;
  443. ret = clk_set_rate(clk, freq);
  444. if (ret) {
  445. dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
  446. ret);
  447. }
  448. return ret;
  449. }
  450. static inline int
  451. _generic_set_opp_domain(struct device *dev, struct clk *clk,
  452. unsigned long old_freq, unsigned long freq,
  453. unsigned int old_pstate, unsigned int new_pstate)
  454. {
  455. int ret;
  456. /* Scaling up? Scale domain performance state before frequency */
  457. if (freq > old_freq) {
  458. ret = dev_pm_genpd_set_performance_state(dev, new_pstate);
  459. if (ret)
  460. return ret;
  461. }
  462. ret = _generic_set_opp_clk_only(dev, clk, old_freq, freq);
  463. if (ret)
  464. goto restore_domain_state;
  465. /* Scaling down? Scale domain performance state after frequency */
  466. if (freq < old_freq) {
  467. ret = dev_pm_genpd_set_performance_state(dev, new_pstate);
  468. if (ret)
  469. goto restore_freq;
  470. }
  471. return 0;
  472. restore_freq:
  473. if (_generic_set_opp_clk_only(dev, clk, freq, old_freq))
  474. dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
  475. __func__, old_freq);
  476. restore_domain_state:
  477. if (freq > old_freq)
  478. dev_pm_genpd_set_performance_state(dev, old_pstate);
  479. return ret;
  480. }
  481. static int _generic_set_opp_regulator(const struct opp_table *opp_table,
  482. struct device *dev,
  483. unsigned long old_freq,
  484. unsigned long freq,
  485. struct dev_pm_opp_supply *old_supply,
  486. struct dev_pm_opp_supply *new_supply)
  487. {
  488. struct regulator *reg = opp_table->regulators[0];
  489. int ret;
  490. /* This function only supports single regulator per device */
  491. if (WARN_ON(opp_table->regulator_count > 1)) {
  492. dev_err(dev, "multiple regulators are not supported\n");
  493. return -EINVAL;
  494. }
  495. /* Scaling up? Scale voltage before frequency */
  496. if (freq > old_freq) {
  497. ret = _set_opp_voltage(dev, reg, new_supply);
  498. if (ret)
  499. goto restore_voltage;
  500. }
  501. /* Change frequency */
  502. ret = _generic_set_opp_clk_only(dev, opp_table->clk, old_freq, freq);
  503. if (ret)
  504. goto restore_voltage;
  505. /* Scaling down? Scale voltage after frequency */
  506. if (freq < old_freq) {
  507. ret = _set_opp_voltage(dev, reg, new_supply);
  508. if (ret)
  509. goto restore_freq;
  510. }
  511. return 0;
  512. restore_freq:
  513. if (_generic_set_opp_clk_only(dev, opp_table->clk, freq, old_freq))
  514. dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
  515. __func__, old_freq);
  516. restore_voltage:
  517. /* This shouldn't harm even if the voltages weren't updated earlier */
  518. if (old_supply)
  519. _set_opp_voltage(dev, reg, old_supply);
  520. return ret;
  521. }
  522. /**
  523. * dev_pm_opp_set_rate() - Configure new OPP based on frequency
  524. * @dev: device for which we do this operation
  525. * @target_freq: frequency to achieve
  526. *
  527. * This configures the power-supplies and clock source to the levels specified
  528. * by the OPP corresponding to the target_freq.
  529. */
  530. int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
  531. {
  532. struct opp_table *opp_table;
  533. unsigned long freq, old_freq;
  534. struct dev_pm_opp *old_opp, *opp;
  535. struct clk *clk;
  536. int ret, size;
  537. if (unlikely(!target_freq)) {
  538. dev_err(dev, "%s: Invalid target frequency %lu\n", __func__,
  539. target_freq);
  540. return -EINVAL;
  541. }
  542. opp_table = _find_opp_table(dev);
  543. if (IS_ERR(opp_table)) {
  544. dev_err(dev, "%s: device opp doesn't exist\n", __func__);
  545. return PTR_ERR(opp_table);
  546. }
  547. clk = opp_table->clk;
  548. if (IS_ERR(clk)) {
  549. dev_err(dev, "%s: No clock available for the device\n",
  550. __func__);
  551. ret = PTR_ERR(clk);
  552. goto put_opp_table;
  553. }
  554. freq = clk_round_rate(clk, target_freq);
  555. if ((long)freq <= 0)
  556. freq = target_freq;
  557. old_freq = clk_get_rate(clk);
  558. /* Return early if nothing to do */
  559. if (old_freq == freq) {
  560. dev_dbg(dev, "%s: old/new frequencies (%lu Hz) are same, nothing to do\n",
  561. __func__, freq);
  562. ret = 0;
  563. goto put_opp_table;
  564. }
  565. old_opp = _find_freq_ceil(opp_table, &old_freq);
  566. if (IS_ERR(old_opp)) {
  567. dev_err(dev, "%s: failed to find current OPP for freq %lu (%ld)\n",
  568. __func__, old_freq, PTR_ERR(old_opp));
  569. }
  570. opp = _find_freq_ceil(opp_table, &freq);
  571. if (IS_ERR(opp)) {
  572. ret = PTR_ERR(opp);
  573. dev_err(dev, "%s: failed to find OPP for freq %lu (%d)\n",
  574. __func__, freq, ret);
  575. goto put_old_opp;
  576. }
  577. dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n", __func__,
  578. old_freq, freq);
  579. /* Only frequency scaling */
  580. if (!opp_table->regulators) {
  581. /*
  582. * We don't support devices with both regulator and
  583. * domain performance-state for now.
  584. */
  585. if (opp_table->genpd_performance_state)
  586. ret = _generic_set_opp_domain(dev, clk, old_freq, freq,
  587. IS_ERR(old_opp) ? 0 : old_opp->pstate,
  588. opp->pstate);
  589. else
  590. ret = _generic_set_opp_clk_only(dev, clk, old_freq, freq);
  591. } else if (!opp_table->set_opp) {
  592. ret = _generic_set_opp_regulator(opp_table, dev, old_freq, freq,
  593. IS_ERR(old_opp) ? NULL : old_opp->supplies,
  594. opp->supplies);
  595. } else {
  596. struct dev_pm_set_opp_data *data;
  597. data = opp_table->set_opp_data;
  598. data->regulators = opp_table->regulators;
  599. data->regulator_count = opp_table->regulator_count;
  600. data->clk = clk;
  601. data->dev = dev;
  602. data->old_opp.rate = old_freq;
  603. size = sizeof(*opp->supplies) * opp_table->regulator_count;
  604. if (IS_ERR(old_opp))
  605. memset(data->old_opp.supplies, 0, size);
  606. else
  607. memcpy(data->old_opp.supplies, old_opp->supplies, size);
  608. data->new_opp.rate = freq;
  609. memcpy(data->new_opp.supplies, opp->supplies, size);
  610. ret = opp_table->set_opp(data);
  611. }
  612. dev_pm_opp_put(opp);
  613. put_old_opp:
  614. if (!IS_ERR(old_opp))
  615. dev_pm_opp_put(old_opp);
  616. put_opp_table:
  617. dev_pm_opp_put_opp_table(opp_table);
  618. return ret;
  619. }
  620. EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate);
  621. /* OPP-dev Helpers */
  622. static void _remove_opp_dev(struct opp_device *opp_dev,
  623. struct opp_table *opp_table)
  624. {
  625. opp_debug_unregister(opp_dev, opp_table);
  626. list_del(&opp_dev->node);
  627. kfree(opp_dev);
  628. }
  629. struct opp_device *_add_opp_dev(const struct device *dev,
  630. struct opp_table *opp_table)
  631. {
  632. struct opp_device *opp_dev;
  633. int ret;
  634. opp_dev = kzalloc(sizeof(*opp_dev), GFP_KERNEL);
  635. if (!opp_dev)
  636. return NULL;
  637. /* Initialize opp-dev */
  638. opp_dev->dev = dev;
  639. list_add(&opp_dev->node, &opp_table->dev_list);
  640. /* Create debugfs entries for the opp_table */
  641. ret = opp_debug_register(opp_dev, opp_table);
  642. if (ret)
  643. dev_err(dev, "%s: Failed to register opp debugfs (%d)\n",
  644. __func__, ret);
  645. return opp_dev;
  646. }
  647. static struct opp_table *_allocate_opp_table(struct device *dev)
  648. {
  649. struct opp_table *opp_table;
  650. struct opp_device *opp_dev;
  651. int ret;
  652. /*
  653. * Allocate a new OPP table. In the infrequent case where a new
  654. * device is needed to be added, we pay this penalty.
  655. */
  656. opp_table = kzalloc(sizeof(*opp_table), GFP_KERNEL);
  657. if (!opp_table)
  658. return NULL;
  659. INIT_LIST_HEAD(&opp_table->dev_list);
  660. opp_dev = _add_opp_dev(dev, opp_table);
  661. if (!opp_dev) {
  662. kfree(opp_table);
  663. return NULL;
  664. }
  665. _of_init_opp_table(opp_table, dev);
  666. /* Find clk for the device */
  667. opp_table->clk = clk_get(dev, NULL);
  668. if (IS_ERR(opp_table->clk)) {
  669. ret = PTR_ERR(opp_table->clk);
  670. if (ret != -EPROBE_DEFER)
  671. dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__,
  672. ret);
  673. }
  674. BLOCKING_INIT_NOTIFIER_HEAD(&opp_table->head);
  675. INIT_LIST_HEAD(&opp_table->opp_list);
  676. mutex_init(&opp_table->lock);
  677. kref_init(&opp_table->kref);
  678. /* Secure the device table modification */
  679. list_add(&opp_table->node, &opp_tables);
  680. return opp_table;
  681. }
  682. void _get_opp_table_kref(struct opp_table *opp_table)
  683. {
  684. kref_get(&opp_table->kref);
  685. }
  686. struct opp_table *dev_pm_opp_get_opp_table(struct device *dev)
  687. {
  688. struct opp_table *opp_table;
  689. /* Hold our table modification lock here */
  690. mutex_lock(&opp_table_lock);
  691. opp_table = _find_opp_table_unlocked(dev);
  692. if (!IS_ERR(opp_table))
  693. goto unlock;
  694. opp_table = _allocate_opp_table(dev);
  695. unlock:
  696. mutex_unlock(&opp_table_lock);
  697. return opp_table;
  698. }
  699. EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_table);
  700. static void _opp_table_kref_release(struct kref *kref)
  701. {
  702. struct opp_table *opp_table = container_of(kref, struct opp_table, kref);
  703. struct opp_device *opp_dev;
  704. /* Release clk */
  705. if (!IS_ERR(opp_table->clk))
  706. clk_put(opp_table->clk);
  707. opp_dev = list_first_entry(&opp_table->dev_list, struct opp_device,
  708. node);
  709. _remove_opp_dev(opp_dev, opp_table);
  710. /* dev_list must be empty now */
  711. WARN_ON(!list_empty(&opp_table->dev_list));
  712. mutex_destroy(&opp_table->lock);
  713. list_del(&opp_table->node);
  714. kfree(opp_table);
  715. mutex_unlock(&opp_table_lock);
  716. }
  717. void dev_pm_opp_put_opp_table(struct opp_table *opp_table)
  718. {
  719. kref_put_mutex(&opp_table->kref, _opp_table_kref_release,
  720. &opp_table_lock);
  721. }
  722. EXPORT_SYMBOL_GPL(dev_pm_opp_put_opp_table);
  723. void _opp_free(struct dev_pm_opp *opp)
  724. {
  725. kfree(opp);
  726. }
  727. static void _opp_kref_release(struct kref *kref)
  728. {
  729. struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref);
  730. struct opp_table *opp_table = opp->opp_table;
  731. /*
  732. * Notify the changes in the availability of the operable
  733. * frequency/voltage list.
  734. */
  735. blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_REMOVE, opp);
  736. opp_debug_remove_one(opp);
  737. list_del(&opp->node);
  738. kfree(opp);
  739. mutex_unlock(&opp_table->lock);
  740. dev_pm_opp_put_opp_table(opp_table);
  741. }
  742. static void dev_pm_opp_get(struct dev_pm_opp *opp)
  743. {
  744. kref_get(&opp->kref);
  745. }
  746. void dev_pm_opp_put(struct dev_pm_opp *opp)
  747. {
  748. kref_put_mutex(&opp->kref, _opp_kref_release, &opp->opp_table->lock);
  749. }
  750. EXPORT_SYMBOL_GPL(dev_pm_opp_put);
  751. /**
  752. * dev_pm_opp_remove() - Remove an OPP from OPP table
  753. * @dev: device for which we do this operation
  754. * @freq: OPP to remove with matching 'freq'
  755. *
  756. * This function removes an opp from the opp table.
  757. */
  758. void dev_pm_opp_remove(struct device *dev, unsigned long freq)
  759. {
  760. struct dev_pm_opp *opp;
  761. struct opp_table *opp_table;
  762. bool found = false;
  763. opp_table = _find_opp_table(dev);
  764. if (IS_ERR(opp_table))
  765. return;
  766. mutex_lock(&opp_table->lock);
  767. list_for_each_entry(opp, &opp_table->opp_list, node) {
  768. if (opp->rate == freq) {
  769. found = true;
  770. break;
  771. }
  772. }
  773. mutex_unlock(&opp_table->lock);
  774. if (found) {
  775. dev_pm_opp_put(opp);
  776. } else {
  777. dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n",
  778. __func__, freq);
  779. }
  780. dev_pm_opp_put_opp_table(opp_table);
  781. }
  782. EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
  783. struct dev_pm_opp *_opp_allocate(struct opp_table *table)
  784. {
  785. struct dev_pm_opp *opp;
  786. int count, supply_size;
  787. /* Allocate space for at least one supply */
  788. count = table->regulator_count ? table->regulator_count : 1;
  789. supply_size = sizeof(*opp->supplies) * count;
  790. /* allocate new OPP node and supplies structures */
  791. opp = kzalloc(sizeof(*opp) + supply_size, GFP_KERNEL);
  792. if (!opp)
  793. return NULL;
  794. /* Put the supplies at the end of the OPP structure as an empty array */
  795. opp->supplies = (struct dev_pm_opp_supply *)(opp + 1);
  796. INIT_LIST_HEAD(&opp->node);
  797. return opp;
  798. }
  799. static bool _opp_supported_by_regulators(struct dev_pm_opp *opp,
  800. struct opp_table *opp_table)
  801. {
  802. struct regulator *reg;
  803. int i;
  804. for (i = 0; i < opp_table->regulator_count; i++) {
  805. reg = opp_table->regulators[i];
  806. if (!regulator_is_supported_voltage(reg,
  807. opp->supplies[i].u_volt_min,
  808. opp->supplies[i].u_volt_max)) {
  809. pr_warn("%s: OPP minuV: %lu maxuV: %lu, not supported by regulator\n",
  810. __func__, opp->supplies[i].u_volt_min,
  811. opp->supplies[i].u_volt_max);
  812. return false;
  813. }
  814. }
  815. return true;
  816. }
  817. /*
  818. * Returns:
  819. * 0: On success. And appropriate error message for duplicate OPPs.
  820. * -EBUSY: For OPP with same freq/volt and is available. The callers of
  821. * _opp_add() must return 0 if they receive -EBUSY from it. This is to make
  822. * sure we don't print error messages unnecessarily if different parts of
  823. * kernel try to initialize the OPP table.
  824. * -EEXIST: For OPP with same freq but different volt or is unavailable. This
  825. * should be considered an error by the callers of _opp_add().
  826. */
  827. int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
  828. struct opp_table *opp_table)
  829. {
  830. struct dev_pm_opp *opp;
  831. struct list_head *head;
  832. int ret;
  833. /*
  834. * Insert new OPP in order of increasing frequency and discard if
  835. * already present.
  836. *
  837. * Need to use &opp_table->opp_list in the condition part of the 'for'
  838. * loop, don't replace it with head otherwise it will become an infinite
  839. * loop.
  840. */
  841. mutex_lock(&opp_table->lock);
  842. head = &opp_table->opp_list;
  843. list_for_each_entry(opp, &opp_table->opp_list, node) {
  844. if (new_opp->rate > opp->rate) {
  845. head = &opp->node;
  846. continue;
  847. }
  848. if (new_opp->rate < opp->rate)
  849. break;
  850. /* Duplicate OPPs */
  851. dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n",
  852. __func__, opp->rate, opp->supplies[0].u_volt,
  853. opp->available, new_opp->rate,
  854. new_opp->supplies[0].u_volt, new_opp->available);
  855. /* Should we compare voltages for all regulators here ? */
  856. ret = opp->available &&
  857. new_opp->supplies[0].u_volt == opp->supplies[0].u_volt ? -EBUSY : -EEXIST;
  858. mutex_unlock(&opp_table->lock);
  859. return ret;
  860. }
  861. if (opp_table->get_pstate)
  862. new_opp->pstate = opp_table->get_pstate(dev, new_opp->rate);
  863. list_add(&new_opp->node, head);
  864. mutex_unlock(&opp_table->lock);
  865. new_opp->opp_table = opp_table;
  866. kref_init(&new_opp->kref);
  867. /* Get a reference to the OPP table */
  868. _get_opp_table_kref(opp_table);
  869. ret = opp_debug_create_one(new_opp, opp_table);
  870. if (ret)
  871. dev_err(dev, "%s: Failed to register opp to debugfs (%d)\n",
  872. __func__, ret);
  873. if (!_opp_supported_by_regulators(new_opp, opp_table)) {
  874. new_opp->available = false;
  875. dev_warn(dev, "%s: OPP not supported by regulators (%lu)\n",
  876. __func__, new_opp->rate);
  877. }
  878. return 0;
  879. }
  880. /**
  881. * _opp_add_v1() - Allocate a OPP based on v1 bindings.
  882. * @opp_table: OPP table
  883. * @dev: device for which we do this operation
  884. * @freq: Frequency in Hz for this OPP
  885. * @u_volt: Voltage in uVolts for this OPP
  886. * @dynamic: Dynamically added OPPs.
  887. *
  888. * This function adds an opp definition to the opp table and returns status.
  889. * The opp is made available by default and it can be controlled using
  890. * dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove.
  891. *
  892. * NOTE: "dynamic" parameter impacts OPPs added by the dev_pm_opp_of_add_table
  893. * and freed by dev_pm_opp_of_remove_table.
  894. *
  895. * Return:
  896. * 0 On success OR
  897. * Duplicate OPPs (both freq and volt are same) and opp->available
  898. * -EEXIST Freq are same and volt are different OR
  899. * Duplicate OPPs (both freq and volt are same) and !opp->available
  900. * -ENOMEM Memory allocation failure
  901. */
  902. int _opp_add_v1(struct opp_table *opp_table, struct device *dev,
  903. unsigned long freq, long u_volt, bool dynamic)
  904. {
  905. struct dev_pm_opp *new_opp;
  906. unsigned long tol;
  907. int ret;
  908. new_opp = _opp_allocate(opp_table);
  909. if (!new_opp)
  910. return -ENOMEM;
  911. /* populate the opp table */
  912. new_opp->rate = freq;
  913. tol = u_volt * opp_table->voltage_tolerance_v1 / 100;
  914. new_opp->supplies[0].u_volt = u_volt;
  915. new_opp->supplies[0].u_volt_min = u_volt - tol;
  916. new_opp->supplies[0].u_volt_max = u_volt + tol;
  917. new_opp->available = true;
  918. new_opp->dynamic = dynamic;
  919. ret = _opp_add(dev, new_opp, opp_table);
  920. if (ret) {
  921. /* Don't return error for duplicate OPPs */
  922. if (ret == -EBUSY)
  923. ret = 0;
  924. goto free_opp;
  925. }
  926. /*
  927. * Notify the changes in the availability of the operable
  928. * frequency/voltage list.
  929. */
  930. blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ADD, new_opp);
  931. return 0;
  932. free_opp:
  933. _opp_free(new_opp);
  934. return ret;
  935. }
  936. /**
  937. * dev_pm_opp_set_supported_hw() - Set supported platforms
  938. * @dev: Device for which supported-hw has to be set.
  939. * @versions: Array of hierarchy of versions to match.
  940. * @count: Number of elements in the array.
  941. *
  942. * This is required only for the V2 bindings, and it enables a platform to
  943. * specify the hierarchy of versions it supports. OPP layer will then enable
  944. * OPPs, which are available for those versions, based on its 'opp-supported-hw'
  945. * property.
  946. */
  947. struct opp_table *dev_pm_opp_set_supported_hw(struct device *dev,
  948. const u32 *versions, unsigned int count)
  949. {
  950. struct opp_table *opp_table;
  951. int ret;
  952. opp_table = dev_pm_opp_get_opp_table(dev);
  953. if (!opp_table)
  954. return ERR_PTR(-ENOMEM);
  955. /* Make sure there are no concurrent readers while updating opp_table */
  956. WARN_ON(!list_empty(&opp_table->opp_list));
  957. /* Do we already have a version hierarchy associated with opp_table? */
  958. if (opp_table->supported_hw) {
  959. dev_err(dev, "%s: Already have supported hardware list\n",
  960. __func__);
  961. ret = -EBUSY;
  962. goto err;
  963. }
  964. opp_table->supported_hw = kmemdup(versions, count * sizeof(*versions),
  965. GFP_KERNEL);
  966. if (!opp_table->supported_hw) {
  967. ret = -ENOMEM;
  968. goto err;
  969. }
  970. opp_table->supported_hw_count = count;
  971. return opp_table;
  972. err:
  973. dev_pm_opp_put_opp_table(opp_table);
  974. return ERR_PTR(ret);
  975. }
  976. EXPORT_SYMBOL_GPL(dev_pm_opp_set_supported_hw);
  977. /**
  978. * dev_pm_opp_put_supported_hw() - Releases resources blocked for supported hw
  979. * @opp_table: OPP table returned by dev_pm_opp_set_supported_hw().
  980. *
  981. * This is required only for the V2 bindings, and is called for a matching
  982. * dev_pm_opp_set_supported_hw(). Until this is called, the opp_table structure
  983. * will not be freed.
  984. */
  985. void dev_pm_opp_put_supported_hw(struct opp_table *opp_table)
  986. {
  987. /* Make sure there are no concurrent readers while updating opp_table */
  988. WARN_ON(!list_empty(&opp_table->opp_list));
  989. if (!opp_table->supported_hw) {
  990. pr_err("%s: Doesn't have supported hardware list\n",
  991. __func__);
  992. return;
  993. }
  994. kfree(opp_table->supported_hw);
  995. opp_table->supported_hw = NULL;
  996. opp_table->supported_hw_count = 0;
  997. dev_pm_opp_put_opp_table(opp_table);
  998. }
  999. EXPORT_SYMBOL_GPL(dev_pm_opp_put_supported_hw);
  1000. /**
  1001. * dev_pm_opp_set_prop_name() - Set prop-extn name
  1002. * @dev: Device for which the prop-name has to be set.
  1003. * @name: name to postfix to properties.
  1004. *
  1005. * This is required only for the V2 bindings, and it enables a platform to
  1006. * specify the extn to be used for certain property names. The properties to
  1007. * which the extension will apply are opp-microvolt and opp-microamp. OPP core
  1008. * should postfix the property name with -<name> while looking for them.
  1009. */
  1010. struct opp_table *dev_pm_opp_set_prop_name(struct device *dev, const char *name)
  1011. {
  1012. struct opp_table *opp_table;
  1013. int ret;
  1014. opp_table = dev_pm_opp_get_opp_table(dev);
  1015. if (!opp_table)
  1016. return ERR_PTR(-ENOMEM);
  1017. /* Make sure there are no concurrent readers while updating opp_table */
  1018. WARN_ON(!list_empty(&opp_table->opp_list));
  1019. /* Do we already have a prop-name associated with opp_table? */
  1020. if (opp_table->prop_name) {
  1021. dev_err(dev, "%s: Already have prop-name %s\n", __func__,
  1022. opp_table->prop_name);
  1023. ret = -EBUSY;
  1024. goto err;
  1025. }
  1026. opp_table->prop_name = kstrdup(name, GFP_KERNEL);
  1027. if (!opp_table->prop_name) {
  1028. ret = -ENOMEM;
  1029. goto err;
  1030. }
  1031. return opp_table;
  1032. err:
  1033. dev_pm_opp_put_opp_table(opp_table);
  1034. return ERR_PTR(ret);
  1035. }
  1036. EXPORT_SYMBOL_GPL(dev_pm_opp_set_prop_name);
  1037. /**
  1038. * dev_pm_opp_put_prop_name() - Releases resources blocked for prop-name
  1039. * @opp_table: OPP table returned by dev_pm_opp_set_prop_name().
  1040. *
  1041. * This is required only for the V2 bindings, and is called for a matching
  1042. * dev_pm_opp_set_prop_name(). Until this is called, the opp_table structure
  1043. * will not be freed.
  1044. */
  1045. void dev_pm_opp_put_prop_name(struct opp_table *opp_table)
  1046. {
  1047. /* Make sure there are no concurrent readers while updating opp_table */
  1048. WARN_ON(!list_empty(&opp_table->opp_list));
  1049. if (!opp_table->prop_name) {
  1050. pr_err("%s: Doesn't have a prop-name\n", __func__);
  1051. return;
  1052. }
  1053. kfree(opp_table->prop_name);
  1054. opp_table->prop_name = NULL;
  1055. dev_pm_opp_put_opp_table(opp_table);
  1056. }
  1057. EXPORT_SYMBOL_GPL(dev_pm_opp_put_prop_name);
  1058. static int _allocate_set_opp_data(struct opp_table *opp_table)
  1059. {
  1060. struct dev_pm_set_opp_data *data;
  1061. int len, count = opp_table->regulator_count;
  1062. if (WARN_ON(!count))
  1063. return -EINVAL;
  1064. /* space for set_opp_data */
  1065. len = sizeof(*data);
  1066. /* space for old_opp.supplies and new_opp.supplies */
  1067. len += 2 * sizeof(struct dev_pm_opp_supply) * count;
  1068. data = kzalloc(len, GFP_KERNEL);
  1069. if (!data)
  1070. return -ENOMEM;
  1071. data->old_opp.supplies = (void *)(data + 1);
  1072. data->new_opp.supplies = data->old_opp.supplies + count;
  1073. opp_table->set_opp_data = data;
  1074. return 0;
  1075. }
  1076. static void _free_set_opp_data(struct opp_table *opp_table)
  1077. {
  1078. kfree(opp_table->set_opp_data);
  1079. opp_table->set_opp_data = NULL;
  1080. }
  1081. /**
  1082. * dev_pm_opp_set_regulators() - Set regulator names for the device
  1083. * @dev: Device for which regulator name is being set.
  1084. * @names: Array of pointers to the names of the regulator.
  1085. * @count: Number of regulators.
  1086. *
  1087. * In order to support OPP switching, OPP layer needs to know the name of the
  1088. * device's regulators, as the core would be required to switch voltages as
  1089. * well.
  1090. *
  1091. * This must be called before any OPPs are initialized for the device.
  1092. */
  1093. struct opp_table *dev_pm_opp_set_regulators(struct device *dev,
  1094. const char * const names[],
  1095. unsigned int count)
  1096. {
  1097. struct opp_table *opp_table;
  1098. struct regulator *reg;
  1099. int ret, i;
  1100. opp_table = dev_pm_opp_get_opp_table(dev);
  1101. if (!opp_table)
  1102. return ERR_PTR(-ENOMEM);
  1103. /* This should be called before OPPs are initialized */
  1104. if (WARN_ON(!list_empty(&opp_table->opp_list))) {
  1105. ret = -EBUSY;
  1106. goto err;
  1107. }
  1108. /* Already have regulators set */
  1109. if (opp_table->regulators) {
  1110. ret = -EBUSY;
  1111. goto err;
  1112. }
  1113. opp_table->regulators = kmalloc_array(count,
  1114. sizeof(*opp_table->regulators),
  1115. GFP_KERNEL);
  1116. if (!opp_table->regulators) {
  1117. ret = -ENOMEM;
  1118. goto err;
  1119. }
  1120. for (i = 0; i < count; i++) {
  1121. reg = regulator_get_optional(dev, names[i]);
  1122. if (IS_ERR(reg)) {
  1123. ret = PTR_ERR(reg);
  1124. if (ret != -EPROBE_DEFER)
  1125. dev_err(dev, "%s: no regulator (%s) found: %d\n",
  1126. __func__, names[i], ret);
  1127. goto free_regulators;
  1128. }
  1129. opp_table->regulators[i] = reg;
  1130. }
  1131. opp_table->regulator_count = count;
  1132. /* Allocate block only once to pass to set_opp() routines */
  1133. ret = _allocate_set_opp_data(opp_table);
  1134. if (ret)
  1135. goto free_regulators;
  1136. return opp_table;
  1137. free_regulators:
  1138. while (i != 0)
  1139. regulator_put(opp_table->regulators[--i]);
  1140. kfree(opp_table->regulators);
  1141. opp_table->regulators = NULL;
  1142. opp_table->regulator_count = 0;
  1143. err:
  1144. dev_pm_opp_put_opp_table(opp_table);
  1145. return ERR_PTR(ret);
  1146. }
  1147. EXPORT_SYMBOL_GPL(dev_pm_opp_set_regulators);
  1148. /**
  1149. * dev_pm_opp_put_regulators() - Releases resources blocked for regulator
  1150. * @opp_table: OPP table returned from dev_pm_opp_set_regulators().
  1151. */
  1152. void dev_pm_opp_put_regulators(struct opp_table *opp_table)
  1153. {
  1154. int i;
  1155. if (!opp_table->regulators) {
  1156. pr_err("%s: Doesn't have regulators set\n", __func__);
  1157. return;
  1158. }
  1159. /* Make sure there are no concurrent readers while updating opp_table */
  1160. WARN_ON(!list_empty(&opp_table->opp_list));
  1161. for (i = opp_table->regulator_count - 1; i >= 0; i--)
  1162. regulator_put(opp_table->regulators[i]);
  1163. _free_set_opp_data(opp_table);
  1164. kfree(opp_table->regulators);
  1165. opp_table->regulators = NULL;
  1166. opp_table->regulator_count = 0;
  1167. dev_pm_opp_put_opp_table(opp_table);
  1168. }
  1169. EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulators);
  1170. /**
  1171. * dev_pm_opp_set_clkname() - Set clk name for the device
  1172. * @dev: Device for which clk name is being set.
  1173. * @name: Clk name.
  1174. *
  1175. * In order to support OPP switching, OPP layer needs to get pointer to the
  1176. * clock for the device. Simple cases work fine without using this routine (i.e.
  1177. * by passing connection-id as NULL), but for a device with multiple clocks
  1178. * available, the OPP core needs to know the exact name of the clk to use.
  1179. *
  1180. * This must be called before any OPPs are initialized for the device.
  1181. */
  1182. struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char *name)
  1183. {
  1184. struct opp_table *opp_table;
  1185. int ret;
  1186. opp_table = dev_pm_opp_get_opp_table(dev);
  1187. if (!opp_table)
  1188. return ERR_PTR(-ENOMEM);
  1189. /* This should be called before OPPs are initialized */
  1190. if (WARN_ON(!list_empty(&opp_table->opp_list))) {
  1191. ret = -EBUSY;
  1192. goto err;
  1193. }
  1194. /* Already have default clk set, free it */
  1195. if (!IS_ERR(opp_table->clk))
  1196. clk_put(opp_table->clk);
  1197. /* Find clk for the device */
  1198. opp_table->clk = clk_get(dev, name);
  1199. if (IS_ERR(opp_table->clk)) {
  1200. ret = PTR_ERR(opp_table->clk);
  1201. if (ret != -EPROBE_DEFER) {
  1202. dev_err(dev, "%s: Couldn't find clock: %d\n", __func__,
  1203. ret);
  1204. }
  1205. goto err;
  1206. }
  1207. return opp_table;
  1208. err:
  1209. dev_pm_opp_put_opp_table(opp_table);
  1210. return ERR_PTR(ret);
  1211. }
  1212. EXPORT_SYMBOL_GPL(dev_pm_opp_set_clkname);
  1213. /**
  1214. * dev_pm_opp_put_clkname() - Releases resources blocked for clk.
  1215. * @opp_table: OPP table returned from dev_pm_opp_set_clkname().
  1216. */
  1217. void dev_pm_opp_put_clkname(struct opp_table *opp_table)
  1218. {
  1219. /* Make sure there are no concurrent readers while updating opp_table */
  1220. WARN_ON(!list_empty(&opp_table->opp_list));
  1221. clk_put(opp_table->clk);
  1222. opp_table->clk = ERR_PTR(-EINVAL);
  1223. dev_pm_opp_put_opp_table(opp_table);
  1224. }
  1225. EXPORT_SYMBOL_GPL(dev_pm_opp_put_clkname);
  1226. /**
  1227. * dev_pm_opp_register_set_opp_helper() - Register custom set OPP helper
  1228. * @dev: Device for which the helper is getting registered.
  1229. * @set_opp: Custom set OPP helper.
  1230. *
  1231. * This is useful to support complex platforms (like platforms with multiple
  1232. * regulators per device), instead of the generic OPP set rate helper.
  1233. *
  1234. * This must be called before any OPPs are initialized for the device.
  1235. */
  1236. struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev,
  1237. int (*set_opp)(struct dev_pm_set_opp_data *data))
  1238. {
  1239. struct opp_table *opp_table;
  1240. int ret;
  1241. if (!set_opp)
  1242. return ERR_PTR(-EINVAL);
  1243. opp_table = dev_pm_opp_get_opp_table(dev);
  1244. if (!opp_table)
  1245. return ERR_PTR(-ENOMEM);
  1246. /* This should be called before OPPs are initialized */
  1247. if (WARN_ON(!list_empty(&opp_table->opp_list))) {
  1248. ret = -EBUSY;
  1249. goto err;
  1250. }
  1251. /* Already have custom set_opp helper */
  1252. if (WARN_ON(opp_table->set_opp)) {
  1253. ret = -EBUSY;
  1254. goto err;
  1255. }
  1256. opp_table->set_opp = set_opp;
  1257. return opp_table;
  1258. err:
  1259. dev_pm_opp_put_opp_table(opp_table);
  1260. return ERR_PTR(ret);
  1261. }
  1262. EXPORT_SYMBOL_GPL(dev_pm_opp_register_set_opp_helper);
  1263. /**
  1264. * dev_pm_opp_unregister_set_opp_helper() - Releases resources blocked for
  1265. * set_opp helper
  1266. * @opp_table: OPP table returned from dev_pm_opp_register_set_opp_helper().
  1267. *
  1268. * Release resources blocked for platform specific set_opp helper.
  1269. */
  1270. void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table)
  1271. {
  1272. if (!opp_table->set_opp) {
  1273. pr_err("%s: Doesn't have custom set_opp helper set\n",
  1274. __func__);
  1275. return;
  1276. }
  1277. /* Make sure there are no concurrent readers while updating opp_table */
  1278. WARN_ON(!list_empty(&opp_table->opp_list));
  1279. opp_table->set_opp = NULL;
  1280. dev_pm_opp_put_opp_table(opp_table);
  1281. }
  1282. EXPORT_SYMBOL_GPL(dev_pm_opp_unregister_set_opp_helper);
  1283. /**
  1284. * dev_pm_opp_register_get_pstate_helper() - Register get_pstate() helper.
  1285. * @dev: Device for which the helper is getting registered.
  1286. * @get_pstate: Helper.
  1287. *
  1288. * TODO: Remove this callback after the same information is available via Device
  1289. * Tree.
  1290. *
  1291. * This allows a platform to initialize the performance states of individual
  1292. * OPPs for its devices, until we get similar information directly from DT.
  1293. *
  1294. * This must be called before the OPPs are initialized for the device.
  1295. */
  1296. struct opp_table *dev_pm_opp_register_get_pstate_helper(struct device *dev,
  1297. int (*get_pstate)(struct device *dev, unsigned long rate))
  1298. {
  1299. struct opp_table *opp_table;
  1300. int ret;
  1301. if (!get_pstate)
  1302. return ERR_PTR(-EINVAL);
  1303. opp_table = dev_pm_opp_get_opp_table(dev);
  1304. if (!opp_table)
  1305. return ERR_PTR(-ENOMEM);
  1306. /* This should be called before OPPs are initialized */
  1307. if (WARN_ON(!list_empty(&opp_table->opp_list))) {
  1308. ret = -EBUSY;
  1309. goto err;
  1310. }
  1311. /* Already have genpd_performance_state set */
  1312. if (WARN_ON(opp_table->genpd_performance_state)) {
  1313. ret = -EBUSY;
  1314. goto err;
  1315. }
  1316. opp_table->genpd_performance_state = true;
  1317. opp_table->get_pstate = get_pstate;
  1318. return opp_table;
  1319. err:
  1320. dev_pm_opp_put_opp_table(opp_table);
  1321. return ERR_PTR(ret);
  1322. }
  1323. EXPORT_SYMBOL_GPL(dev_pm_opp_register_get_pstate_helper);
  1324. /**
  1325. * dev_pm_opp_unregister_get_pstate_helper() - Releases resources blocked for
  1326. * get_pstate() helper
  1327. * @opp_table: OPP table returned from dev_pm_opp_register_get_pstate_helper().
  1328. *
  1329. * Release resources blocked for platform specific get_pstate() helper.
  1330. */
  1331. void dev_pm_opp_unregister_get_pstate_helper(struct opp_table *opp_table)
  1332. {
  1333. if (!opp_table->genpd_performance_state) {
  1334. pr_err("%s: Doesn't have performance states set\n",
  1335. __func__);
  1336. return;
  1337. }
  1338. /* Make sure there are no concurrent readers while updating opp_table */
  1339. WARN_ON(!list_empty(&opp_table->opp_list));
  1340. opp_table->genpd_performance_state = false;
  1341. opp_table->get_pstate = NULL;
  1342. dev_pm_opp_put_opp_table(opp_table);
  1343. }
  1344. EXPORT_SYMBOL_GPL(dev_pm_opp_unregister_get_pstate_helper);
  1345. /**
  1346. * dev_pm_opp_add() - Add an OPP table from a table definitions
  1347. * @dev: device for which we do this operation
  1348. * @freq: Frequency in Hz for this OPP
  1349. * @u_volt: Voltage in uVolts for this OPP
  1350. *
  1351. * This function adds an opp definition to the opp table and returns status.
  1352. * The opp is made available by default and it can be controlled using
  1353. * dev_pm_opp_enable/disable functions.
  1354. *
  1355. * Return:
  1356. * 0 On success OR
  1357. * Duplicate OPPs (both freq and volt are same) and opp->available
  1358. * -EEXIST Freq are same and volt are different OR
  1359. * Duplicate OPPs (both freq and volt are same) and !opp->available
  1360. * -ENOMEM Memory allocation failure
  1361. */
  1362. int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
  1363. {
  1364. struct opp_table *opp_table;
  1365. int ret;
  1366. opp_table = dev_pm_opp_get_opp_table(dev);
  1367. if (!opp_table)
  1368. return -ENOMEM;
  1369. ret = _opp_add_v1(opp_table, dev, freq, u_volt, true);
  1370. dev_pm_opp_put_opp_table(opp_table);
  1371. return ret;
  1372. }
  1373. EXPORT_SYMBOL_GPL(dev_pm_opp_add);
  1374. /**
  1375. * _opp_set_availability() - helper to set the availability of an opp
  1376. * @dev: device for which we do this operation
  1377. * @freq: OPP frequency to modify availability
  1378. * @availability_req: availability status requested for this opp
  1379. *
  1380. * Set the availability of an OPP, opp_{enable,disable} share a common logic
  1381. * which is isolated here.
  1382. *
  1383. * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
  1384. * copy operation, returns 0 if no modification was done OR modification was
  1385. * successful.
  1386. */
  1387. static int _opp_set_availability(struct device *dev, unsigned long freq,
  1388. bool availability_req)
  1389. {
  1390. struct opp_table *opp_table;
  1391. struct dev_pm_opp *tmp_opp, *opp = ERR_PTR(-ENODEV);
  1392. int r = 0;
  1393. /* Find the opp_table */
  1394. opp_table = _find_opp_table(dev);
  1395. if (IS_ERR(opp_table)) {
  1396. r = PTR_ERR(opp_table);
  1397. dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
  1398. return r;
  1399. }
  1400. mutex_lock(&opp_table->lock);
  1401. /* Do we have the frequency? */
  1402. list_for_each_entry(tmp_opp, &opp_table->opp_list, node) {
  1403. if (tmp_opp->rate == freq) {
  1404. opp = tmp_opp;
  1405. break;
  1406. }
  1407. }
  1408. if (IS_ERR(opp)) {
  1409. r = PTR_ERR(opp);
  1410. goto unlock;
  1411. }
  1412. /* Is update really needed? */
  1413. if (opp->available == availability_req)
  1414. goto unlock;
  1415. opp->available = availability_req;
  1416. dev_pm_opp_get(opp);
  1417. mutex_unlock(&opp_table->lock);
  1418. /* Notify the change of the OPP availability */
  1419. if (availability_req)
  1420. blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ENABLE,
  1421. opp);
  1422. else
  1423. blocking_notifier_call_chain(&opp_table->head,
  1424. OPP_EVENT_DISABLE, opp);
  1425. dev_pm_opp_put(opp);
  1426. goto put_table;
  1427. unlock:
  1428. mutex_unlock(&opp_table->lock);
  1429. put_table:
  1430. dev_pm_opp_put_opp_table(opp_table);
  1431. return r;
  1432. }
  1433. /**
  1434. * dev_pm_opp_enable() - Enable a specific OPP
  1435. * @dev: device for which we do this operation
  1436. * @freq: OPP frequency to enable
  1437. *
  1438. * Enables a provided opp. If the operation is valid, this returns 0, else the
  1439. * corresponding error value. It is meant to be used for users an OPP available
  1440. * after being temporarily made unavailable with dev_pm_opp_disable.
  1441. *
  1442. * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
  1443. * copy operation, returns 0 if no modification was done OR modification was
  1444. * successful.
  1445. */
  1446. int dev_pm_opp_enable(struct device *dev, unsigned long freq)
  1447. {
  1448. return _opp_set_availability(dev, freq, true);
  1449. }
  1450. EXPORT_SYMBOL_GPL(dev_pm_opp_enable);
  1451. /**
  1452. * dev_pm_opp_disable() - Disable a specific OPP
  1453. * @dev: device for which we do this operation
  1454. * @freq: OPP frequency to disable
  1455. *
  1456. * Disables a provided opp. If the operation is valid, this returns
  1457. * 0, else the corresponding error value. It is meant to be a temporary
  1458. * control by users to make this OPP not available until the circumstances are
  1459. * right to make it available again (with a call to dev_pm_opp_enable).
  1460. *
  1461. * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
  1462. * copy operation, returns 0 if no modification was done OR modification was
  1463. * successful.
  1464. */
  1465. int dev_pm_opp_disable(struct device *dev, unsigned long freq)
  1466. {
  1467. return _opp_set_availability(dev, freq, false);
  1468. }
  1469. EXPORT_SYMBOL_GPL(dev_pm_opp_disable);
  1470. /**
  1471. * dev_pm_opp_register_notifier() - Register OPP notifier for the device
  1472. * @dev: Device for which notifier needs to be registered
  1473. * @nb: Notifier block to be registered
  1474. *
  1475. * Return: 0 on success or a negative error value.
  1476. */
  1477. int dev_pm_opp_register_notifier(struct device *dev, struct notifier_block *nb)
  1478. {
  1479. struct opp_table *opp_table;
  1480. int ret;
  1481. opp_table = _find_opp_table(dev);
  1482. if (IS_ERR(opp_table))
  1483. return PTR_ERR(opp_table);
  1484. ret = blocking_notifier_chain_register(&opp_table->head, nb);
  1485. dev_pm_opp_put_opp_table(opp_table);
  1486. return ret;
  1487. }
  1488. EXPORT_SYMBOL(dev_pm_opp_register_notifier);
  1489. /**
  1490. * dev_pm_opp_unregister_notifier() - Unregister OPP notifier for the device
  1491. * @dev: Device for which notifier needs to be unregistered
  1492. * @nb: Notifier block to be unregistered
  1493. *
  1494. * Return: 0 on success or a negative error value.
  1495. */
  1496. int dev_pm_opp_unregister_notifier(struct device *dev,
  1497. struct notifier_block *nb)
  1498. {
  1499. struct opp_table *opp_table;
  1500. int ret;
  1501. opp_table = _find_opp_table(dev);
  1502. if (IS_ERR(opp_table))
  1503. return PTR_ERR(opp_table);
  1504. ret = blocking_notifier_chain_unregister(&opp_table->head, nb);
  1505. dev_pm_opp_put_opp_table(opp_table);
  1506. return ret;
  1507. }
  1508. EXPORT_SYMBOL(dev_pm_opp_unregister_notifier);
  1509. /*
  1510. * Free OPPs either created using static entries present in DT or even the
  1511. * dynamically added entries based on remove_all param.
  1512. */
  1513. void _dev_pm_opp_remove_table(struct opp_table *opp_table, struct device *dev,
  1514. bool remove_all)
  1515. {
  1516. struct dev_pm_opp *opp, *tmp;
  1517. /* Find if opp_table manages a single device */
  1518. if (list_is_singular(&opp_table->dev_list)) {
  1519. /* Free static OPPs */
  1520. list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) {
  1521. if (remove_all || !opp->dynamic)
  1522. dev_pm_opp_put(opp);
  1523. }
  1524. /*
  1525. * The OPP table is getting removed, drop the performance state
  1526. * constraints.
  1527. */
  1528. if (opp_table->genpd_performance_state)
  1529. dev_pm_genpd_set_performance_state(dev, 0);
  1530. } else {
  1531. _remove_opp_dev(_find_opp_dev(dev, opp_table), opp_table);
  1532. }
  1533. }
  1534. void _dev_pm_opp_find_and_remove_table(struct device *dev, bool remove_all)
  1535. {
  1536. struct opp_table *opp_table;
  1537. /* Check for existing table for 'dev' */
  1538. opp_table = _find_opp_table(dev);
  1539. if (IS_ERR(opp_table)) {
  1540. int error = PTR_ERR(opp_table);
  1541. if (error != -ENODEV)
  1542. WARN(1, "%s: opp_table: %d\n",
  1543. IS_ERR_OR_NULL(dev) ?
  1544. "Invalid device" : dev_name(dev),
  1545. error);
  1546. return;
  1547. }
  1548. _dev_pm_opp_remove_table(opp_table, dev, remove_all);
  1549. dev_pm_opp_put_opp_table(opp_table);
  1550. }
  1551. /**
  1552. * dev_pm_opp_remove_table() - Free all OPPs associated with the device
  1553. * @dev: device pointer used to lookup OPP table.
  1554. *
  1555. * Free both OPPs created using static entries present in DT and the
  1556. * dynamically added entries.
  1557. */
  1558. void dev_pm_opp_remove_table(struct device *dev)
  1559. {
  1560. _dev_pm_opp_find_and_remove_table(dev, true);
  1561. }
  1562. EXPORT_SYMBOL_GPL(dev_pm_opp_remove_table);