core.c 53 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928
  1. /*
  2. * Generic OPP Interface
  3. *
  4. * Copyright (C) 2009-2010 Texas Instruments Incorporated.
  5. * Nishanth Menon
  6. * Romit Dasgupta
  7. * Kevin Hilman
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. */
  13. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  14. #include <linux/clk.h>
  15. #include <linux/errno.h>
  16. #include <linux/err.h>
  17. #include <linux/slab.h>
  18. #include <linux/device.h>
  19. #include <linux/export.h>
  20. #include <linux/regulator/consumer.h>
  21. #include "opp.h"
  22. /*
  23. * The root of the list of all opp-tables. All opp_table structures branch off
  24. * from here, with each opp_table containing the list of opps it supports in
  25. * various states of availability.
  26. */
  27. LIST_HEAD(opp_tables);
  28. /* Lock to allow exclusive modification to the device and opp lists */
  29. DEFINE_MUTEX(opp_table_lock);
  30. #define opp_rcu_lockdep_assert() \
  31. do { \
  32. RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
  33. !lockdep_is_held(&opp_table_lock), \
  34. "Missing rcu_read_lock() or " \
  35. "opp_table_lock protection"); \
  36. } while (0)
  37. static struct opp_device *_find_opp_dev(const struct device *dev,
  38. struct opp_table *opp_table)
  39. {
  40. struct opp_device *opp_dev;
  41. list_for_each_entry(opp_dev, &opp_table->dev_list, node)
  42. if (opp_dev->dev == dev)
  43. return opp_dev;
  44. return NULL;
  45. }
  46. /**
  47. * _find_opp_table() - find opp_table struct using device pointer
  48. * @dev: device pointer used to lookup OPP table
  49. *
  50. * Search OPP table for one containing matching device. Does a RCU reader
  51. * operation to grab the pointer needed.
  52. *
  53. * Return: pointer to 'struct opp_table' if found, otherwise -ENODEV or
  54. * -EINVAL based on type of error.
  55. *
  56. * Locking: For readers, this function must be called under rcu_read_lock().
  57. * opp_table is a RCU protected pointer, which means that opp_table is valid
  58. * as long as we are under RCU lock.
  59. *
  60. * For Writers, this function must be called with opp_table_lock held.
  61. */
  62. struct opp_table *_find_opp_table(struct device *dev)
  63. {
  64. struct opp_table *opp_table;
  65. opp_rcu_lockdep_assert();
  66. if (IS_ERR_OR_NULL(dev)) {
  67. pr_err("%s: Invalid parameters\n", __func__);
  68. return ERR_PTR(-EINVAL);
  69. }
  70. list_for_each_entry_rcu(opp_table, &opp_tables, node)
  71. if (_find_opp_dev(dev, opp_table))
  72. return opp_table;
  73. return ERR_PTR(-ENODEV);
  74. }
  75. /**
  76. * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an opp
  77. * @opp: opp for which voltage has to be returned for
  78. *
  79. * Return: voltage in micro volt corresponding to the opp, else
  80. * return 0
  81. *
  82. * This is useful only for devices with single power supply.
  83. *
  84. * Locking: This function must be called under rcu_read_lock(). opp is a rcu
  85. * protected pointer. This means that opp which could have been fetched by
  86. * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
  87. * under RCU lock. The pointer returned by the opp_find_freq family must be
  88. * used in the same section as the usage of this function with the pointer
  89. * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
  90. * pointer.
  91. */
  92. unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
  93. {
  94. struct dev_pm_opp *tmp_opp;
  95. unsigned long v = 0;
  96. opp_rcu_lockdep_assert();
  97. tmp_opp = rcu_dereference(opp);
  98. if (IS_ERR_OR_NULL(tmp_opp))
  99. pr_err("%s: Invalid parameters\n", __func__);
  100. else
  101. v = tmp_opp->supplies[0].u_volt;
  102. return v;
  103. }
  104. EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage);
  105. /**
  106. * dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp
  107. * @opp: opp for which frequency has to be returned for
  108. *
  109. * Return: frequency in hertz corresponding to the opp, else
  110. * return 0
  111. *
  112. * Locking: This function must be called under rcu_read_lock(). opp is a rcu
  113. * protected pointer. This means that opp which could have been fetched by
  114. * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
  115. * under RCU lock. The pointer returned by the opp_find_freq family must be
  116. * used in the same section as the usage of this function with the pointer
  117. * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
  118. * pointer.
  119. */
  120. unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
  121. {
  122. struct dev_pm_opp *tmp_opp;
  123. unsigned long f = 0;
  124. opp_rcu_lockdep_assert();
  125. tmp_opp = rcu_dereference(opp);
  126. if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available)
  127. pr_err("%s: Invalid parameters\n", __func__);
  128. else
  129. f = tmp_opp->rate;
  130. return f;
  131. }
  132. EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
  133. /**
  134. * dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not
  135. * @opp: opp for which turbo mode is being verified
  136. *
  137. * Turbo OPPs are not for normal use, and can be enabled (under certain
  138. * conditions) for short duration of times to finish high throughput work
  139. * quickly. Running on them for longer times may overheat the chip.
  140. *
  141. * Return: true if opp is turbo opp, else false.
  142. *
  143. * Locking: This function must be called under rcu_read_lock(). opp is a rcu
  144. * protected pointer. This means that opp which could have been fetched by
  145. * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
  146. * under RCU lock. The pointer returned by the opp_find_freq family must be
  147. * used in the same section as the usage of this function with the pointer
  148. * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
  149. * pointer.
  150. */
  151. bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp)
  152. {
  153. struct dev_pm_opp *tmp_opp;
  154. opp_rcu_lockdep_assert();
  155. tmp_opp = rcu_dereference(opp);
  156. if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available) {
  157. pr_err("%s: Invalid parameters\n", __func__);
  158. return false;
  159. }
  160. return tmp_opp->turbo;
  161. }
  162. EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo);
  163. /**
  164. * dev_pm_opp_get_max_clock_latency() - Get max clock latency in nanoseconds
  165. * @dev: device for which we do this operation
  166. *
  167. * Return: This function returns the max clock latency in nanoseconds.
  168. *
  169. * Locking: This function takes rcu_read_lock().
  170. */
  171. unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
  172. {
  173. struct opp_table *opp_table;
  174. unsigned long clock_latency_ns;
  175. rcu_read_lock();
  176. opp_table = _find_opp_table(dev);
  177. if (IS_ERR(opp_table))
  178. clock_latency_ns = 0;
  179. else
  180. clock_latency_ns = opp_table->clock_latency_ns_max;
  181. rcu_read_unlock();
  182. return clock_latency_ns;
  183. }
  184. EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency);
  185. static int _get_regulator_count(struct device *dev)
  186. {
  187. struct opp_table *opp_table;
  188. int count;
  189. rcu_read_lock();
  190. opp_table = _find_opp_table(dev);
  191. if (!IS_ERR(opp_table))
  192. count = opp_table->regulator_count;
  193. else
  194. count = 0;
  195. rcu_read_unlock();
  196. return count;
  197. }
  198. /**
  199. * dev_pm_opp_get_max_volt_latency() - Get max voltage latency in nanoseconds
  200. * @dev: device for which we do this operation
  201. *
  202. * Return: This function returns the max voltage latency in nanoseconds.
  203. *
  204. * Locking: This function takes rcu_read_lock().
  205. */
  206. unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
  207. {
  208. struct opp_table *opp_table;
  209. struct dev_pm_opp *opp;
  210. struct regulator *reg, **regulators;
  211. unsigned long latency_ns = 0;
  212. int ret, i, count;
  213. struct {
  214. unsigned long min;
  215. unsigned long max;
  216. } *uV;
  217. count = _get_regulator_count(dev);
  218. /* Regulator may not be required for the device */
  219. if (!count)
  220. return 0;
  221. regulators = kmalloc_array(count, sizeof(*regulators), GFP_KERNEL);
  222. if (!regulators)
  223. return 0;
  224. uV = kmalloc_array(count, sizeof(*uV), GFP_KERNEL);
  225. if (!uV)
  226. goto free_regulators;
  227. rcu_read_lock();
  228. opp_table = _find_opp_table(dev);
  229. if (IS_ERR(opp_table)) {
  230. rcu_read_unlock();
  231. goto free_uV;
  232. }
  233. memcpy(regulators, opp_table->regulators, count * sizeof(*regulators));
  234. for (i = 0; i < count; i++) {
  235. uV[i].min = ~0;
  236. uV[i].max = 0;
  237. list_for_each_entry_rcu(opp, &opp_table->opp_list, node) {
  238. if (!opp->available)
  239. continue;
  240. if (opp->supplies[i].u_volt_min < uV[i].min)
  241. uV[i].min = opp->supplies[i].u_volt_min;
  242. if (opp->supplies[i].u_volt_max > uV[i].max)
  243. uV[i].max = opp->supplies[i].u_volt_max;
  244. }
  245. }
  246. rcu_read_unlock();
  247. /*
  248. * The caller needs to ensure that opp_table (and hence the regulator)
  249. * isn't freed, while we are executing this routine.
  250. */
  251. for (i = 0; reg = regulators[i], i < count; i++) {
  252. ret = regulator_set_voltage_time(reg, uV[i].min, uV[i].max);
  253. if (ret > 0)
  254. latency_ns += ret * 1000;
  255. }
  256. free_uV:
  257. kfree(uV);
  258. free_regulators:
  259. kfree(regulators);
  260. return latency_ns;
  261. }
  262. EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_volt_latency);
  263. /**
  264. * dev_pm_opp_get_max_transition_latency() - Get max transition latency in
  265. * nanoseconds
  266. * @dev: device for which we do this operation
  267. *
  268. * Return: This function returns the max transition latency, in nanoseconds, to
  269. * switch from one OPP to other.
  270. *
  271. * Locking: This function takes rcu_read_lock().
  272. */
  273. unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev)
  274. {
  275. return dev_pm_opp_get_max_volt_latency(dev) +
  276. dev_pm_opp_get_max_clock_latency(dev);
  277. }
  278. EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_transition_latency);
  279. /**
  280. * dev_pm_opp_get_suspend_opp() - Get suspend opp
  281. * @dev: device for which we do this operation
  282. *
  283. * Return: This function returns pointer to the suspend opp if it is
  284. * defined and available, otherwise it returns NULL.
  285. *
  286. * Locking: This function must be called under rcu_read_lock(). opp is a rcu
  287. * protected pointer. The reason for the same is that the opp pointer which is
  288. * returned will remain valid for use with opp_get_{voltage, freq} only while
  289. * under the locked area. The pointer returned must be used prior to unlocking
  290. * with rcu_read_unlock() to maintain the integrity of the pointer.
  291. */
  292. struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev)
  293. {
  294. struct opp_table *opp_table;
  295. opp_rcu_lockdep_assert();
  296. opp_table = _find_opp_table(dev);
  297. if (IS_ERR(opp_table) || !opp_table->suspend_opp ||
  298. !opp_table->suspend_opp->available)
  299. return NULL;
  300. return opp_table->suspend_opp;
  301. }
  302. EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp);
  303. /**
  304. * dev_pm_opp_get_opp_count() - Get number of opps available in the opp table
  305. * @dev: device for which we do this operation
  306. *
  307. * Return: This function returns the number of available opps if there are any,
  308. * else returns 0 if none or the corresponding error value.
  309. *
  310. * Locking: This function takes rcu_read_lock().
  311. */
  312. int dev_pm_opp_get_opp_count(struct device *dev)
  313. {
  314. struct opp_table *opp_table;
  315. struct dev_pm_opp *temp_opp;
  316. int count = 0;
  317. rcu_read_lock();
  318. opp_table = _find_opp_table(dev);
  319. if (IS_ERR(opp_table)) {
  320. count = PTR_ERR(opp_table);
  321. dev_err(dev, "%s: OPP table not found (%d)\n",
  322. __func__, count);
  323. goto out_unlock;
  324. }
  325. list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
  326. if (temp_opp->available)
  327. count++;
  328. }
  329. out_unlock:
  330. rcu_read_unlock();
  331. return count;
  332. }
  333. EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
  334. /**
  335. * dev_pm_opp_find_freq_exact() - search for an exact frequency
  336. * @dev: device for which we do this operation
  337. * @freq: frequency to search for
  338. * @available: true/false - match for available opp
  339. *
  340. * Return: Searches for exact match in the opp table and returns pointer to the
  341. * matching opp if found, else returns ERR_PTR in case of error and should
  342. * be handled using IS_ERR. Error return values can be:
  343. * EINVAL: for bad pointer
  344. * ERANGE: no match found for search
  345. * ENODEV: if device not found in list of registered devices
  346. *
  347. * Note: available is a modifier for the search. if available=true, then the
  348. * match is for exact matching frequency and is available in the stored OPP
  349. * table. if false, the match is for exact frequency which is not available.
  350. *
  351. * This provides a mechanism to enable an opp which is not available currently
  352. * or the opposite as well.
  353. *
  354. * Locking: This function must be called under rcu_read_lock(). opp is a rcu
  355. * protected pointer. The reason for the same is that the opp pointer which is
  356. * returned will remain valid for use with opp_get_{voltage, freq} only while
  357. * under the locked area. The pointer returned must be used prior to unlocking
  358. * with rcu_read_unlock() to maintain the integrity of the pointer.
  359. */
  360. struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
  361. unsigned long freq,
  362. bool available)
  363. {
  364. struct opp_table *opp_table;
  365. struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
  366. opp_rcu_lockdep_assert();
  367. opp_table = _find_opp_table(dev);
  368. if (IS_ERR(opp_table)) {
  369. int r = PTR_ERR(opp_table);
  370. dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r);
  371. return ERR_PTR(r);
  372. }
  373. list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
  374. if (temp_opp->available == available &&
  375. temp_opp->rate == freq) {
  376. opp = temp_opp;
  377. break;
  378. }
  379. }
  380. return opp;
  381. }
  382. EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
  383. static noinline struct dev_pm_opp *_find_freq_ceil(struct opp_table *opp_table,
  384. unsigned long *freq)
  385. {
  386. struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
  387. list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
  388. if (temp_opp->available && temp_opp->rate >= *freq) {
  389. opp = temp_opp;
  390. *freq = opp->rate;
  391. break;
  392. }
  393. }
  394. return opp;
  395. }
  396. /**
  397. * dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq
  398. * @dev: device for which we do this operation
  399. * @freq: Start frequency
  400. *
  401. * Search for the matching ceil *available* OPP from a starting freq
  402. * for a device.
  403. *
  404. * Return: matching *opp and refreshes *freq accordingly, else returns
  405. * ERR_PTR in case of error and should be handled using IS_ERR. Error return
  406. * values can be:
  407. * EINVAL: for bad pointer
  408. * ERANGE: no match found for search
  409. * ENODEV: if device not found in list of registered devices
  410. *
  411. * Locking: This function must be called under rcu_read_lock(). opp is a rcu
  412. * protected pointer. The reason for the same is that the opp pointer which is
  413. * returned will remain valid for use with opp_get_{voltage, freq} only while
  414. * under the locked area. The pointer returned must be used prior to unlocking
  415. * with rcu_read_unlock() to maintain the integrity of the pointer.
  416. */
  417. struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
  418. unsigned long *freq)
  419. {
  420. struct opp_table *opp_table;
  421. opp_rcu_lockdep_assert();
  422. if (!dev || !freq) {
  423. dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
  424. return ERR_PTR(-EINVAL);
  425. }
  426. opp_table = _find_opp_table(dev);
  427. if (IS_ERR(opp_table))
  428. return ERR_CAST(opp_table);
  429. return _find_freq_ceil(opp_table, freq);
  430. }
  431. EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
  432. /**
  433. * dev_pm_opp_find_freq_floor() - Search for a rounded floor freq
  434. * @dev: device for which we do this operation
  435. * @freq: Start frequency
  436. *
  437. * Search for the matching floor *available* OPP from a starting freq
  438. * for a device.
  439. *
  440. * Return: matching *opp and refreshes *freq accordingly, else returns
  441. * ERR_PTR in case of error and should be handled using IS_ERR. Error return
  442. * values can be:
  443. * EINVAL: for bad pointer
  444. * ERANGE: no match found for search
  445. * ENODEV: if device not found in list of registered devices
  446. *
  447. * Locking: This function must be called under rcu_read_lock(). opp is a rcu
  448. * protected pointer. The reason for the same is that the opp pointer which is
  449. * returned will remain valid for use with opp_get_{voltage, freq} only while
  450. * under the locked area. The pointer returned must be used prior to unlocking
  451. * with rcu_read_unlock() to maintain the integrity of the pointer.
  452. */
  453. struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
  454. unsigned long *freq)
  455. {
  456. struct opp_table *opp_table;
  457. struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
  458. opp_rcu_lockdep_assert();
  459. if (!dev || !freq) {
  460. dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
  461. return ERR_PTR(-EINVAL);
  462. }
  463. opp_table = _find_opp_table(dev);
  464. if (IS_ERR(opp_table))
  465. return ERR_CAST(opp_table);
  466. list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
  467. if (temp_opp->available) {
  468. /* go to the next node, before choosing prev */
  469. if (temp_opp->rate > *freq)
  470. break;
  471. else
  472. opp = temp_opp;
  473. }
  474. }
  475. if (!IS_ERR(opp))
  476. *freq = opp->rate;
  477. return opp;
  478. }
  479. EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
  480. /*
  481. * The caller needs to ensure that opp_table (and hence the clk) isn't freed,
  482. * while clk returned here is used.
  483. */
  484. static struct clk *_get_opp_clk(struct device *dev)
  485. {
  486. struct opp_table *opp_table;
  487. struct clk *clk;
  488. rcu_read_lock();
  489. opp_table = _find_opp_table(dev);
  490. if (IS_ERR(opp_table)) {
  491. dev_err(dev, "%s: device opp doesn't exist\n", __func__);
  492. clk = ERR_CAST(opp_table);
  493. goto unlock;
  494. }
  495. clk = opp_table->clk;
  496. if (IS_ERR(clk))
  497. dev_err(dev, "%s: No clock available for the device\n",
  498. __func__);
  499. unlock:
  500. rcu_read_unlock();
  501. return clk;
  502. }
  503. static int _set_opp_voltage(struct device *dev, struct regulator *reg,
  504. struct dev_pm_opp_supply *supply)
  505. {
  506. int ret;
  507. /* Regulator not available for device */
  508. if (IS_ERR(reg)) {
  509. dev_dbg(dev, "%s: regulator not available: %ld\n", __func__,
  510. PTR_ERR(reg));
  511. return 0;
  512. }
  513. dev_dbg(dev, "%s: voltages (mV): %lu %lu %lu\n", __func__,
  514. supply->u_volt_min, supply->u_volt, supply->u_volt_max);
  515. ret = regulator_set_voltage_triplet(reg, supply->u_volt_min,
  516. supply->u_volt, supply->u_volt_max);
  517. if (ret)
  518. dev_err(dev, "%s: failed to set voltage (%lu %lu %lu mV): %d\n",
  519. __func__, supply->u_volt_min, supply->u_volt,
  520. supply->u_volt_max, ret);
  521. return ret;
  522. }
  523. static inline int
  524. _generic_set_opp_clk_only(struct device *dev, struct clk *clk,
  525. unsigned long old_freq, unsigned long freq)
  526. {
  527. int ret;
  528. ret = clk_set_rate(clk, freq);
  529. if (ret) {
  530. dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
  531. ret);
  532. }
  533. return ret;
  534. }
  535. static int _generic_set_opp(struct dev_pm_set_opp_data *data)
  536. {
  537. struct dev_pm_opp_supply *old_supply = data->old_opp.supplies;
  538. struct dev_pm_opp_supply *new_supply = data->new_opp.supplies;
  539. unsigned long old_freq = data->old_opp.rate, freq = data->new_opp.rate;
  540. struct regulator *reg = data->regulators[0];
  541. struct device *dev= data->dev;
  542. int ret;
  543. /* This function only supports single regulator per device */
  544. if (WARN_ON(data->regulator_count > 1)) {
  545. dev_err(dev, "multiple regulators are not supported\n");
  546. return -EINVAL;
  547. }
  548. /* Scaling up? Scale voltage before frequency */
  549. if (freq > old_freq) {
  550. ret = _set_opp_voltage(dev, reg, new_supply);
  551. if (ret)
  552. goto restore_voltage;
  553. }
  554. /* Change frequency */
  555. ret = _generic_set_opp_clk_only(dev, data->clk, old_freq, freq);
  556. if (ret)
  557. goto restore_voltage;
  558. /* Scaling down? Scale voltage after frequency */
  559. if (freq < old_freq) {
  560. ret = _set_opp_voltage(dev, reg, new_supply);
  561. if (ret)
  562. goto restore_freq;
  563. }
  564. return 0;
  565. restore_freq:
  566. if (_generic_set_opp_clk_only(dev, data->clk, freq, old_freq))
  567. dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
  568. __func__, old_freq);
  569. restore_voltage:
  570. /* This shouldn't harm even if the voltages weren't updated earlier */
  571. if (old_supply->u_volt)
  572. _set_opp_voltage(dev, reg, old_supply);
  573. return ret;
  574. }
  575. /**
  576. * dev_pm_opp_set_rate() - Configure new OPP based on frequency
  577. * @dev: device for which we do this operation
  578. * @target_freq: frequency to achieve
  579. *
  580. * This configures the power-supplies and clock source to the levels specified
  581. * by the OPP corresponding to the target_freq.
  582. *
  583. * Locking: This function takes rcu_read_lock().
  584. */
  585. int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
  586. {
  587. struct opp_table *opp_table;
  588. unsigned long freq, old_freq;
  589. int (*set_opp)(struct dev_pm_set_opp_data *data);
  590. struct dev_pm_opp *old_opp, *opp;
  591. struct regulator **regulators;
  592. struct dev_pm_set_opp_data *data;
  593. struct clk *clk;
  594. int ret, size;
  595. if (unlikely(!target_freq)) {
  596. dev_err(dev, "%s: Invalid target frequency %lu\n", __func__,
  597. target_freq);
  598. return -EINVAL;
  599. }
  600. clk = _get_opp_clk(dev);
  601. if (IS_ERR(clk))
  602. return PTR_ERR(clk);
  603. freq = clk_round_rate(clk, target_freq);
  604. if ((long)freq <= 0)
  605. freq = target_freq;
  606. old_freq = clk_get_rate(clk);
  607. /* Return early if nothing to do */
  608. if (old_freq == freq) {
  609. dev_dbg(dev, "%s: old/new frequencies (%lu Hz) are same, nothing to do\n",
  610. __func__, freq);
  611. return 0;
  612. }
  613. rcu_read_lock();
  614. opp_table = _find_opp_table(dev);
  615. if (IS_ERR(opp_table)) {
  616. dev_err(dev, "%s: device opp doesn't exist\n", __func__);
  617. rcu_read_unlock();
  618. return PTR_ERR(opp_table);
  619. }
  620. old_opp = _find_freq_ceil(opp_table, &old_freq);
  621. if (IS_ERR(old_opp)) {
  622. dev_err(dev, "%s: failed to find current OPP for freq %lu (%ld)\n",
  623. __func__, old_freq, PTR_ERR(old_opp));
  624. }
  625. opp = _find_freq_ceil(opp_table, &freq);
  626. if (IS_ERR(opp)) {
  627. ret = PTR_ERR(opp);
  628. dev_err(dev, "%s: failed to find OPP for freq %lu (%d)\n",
  629. __func__, freq, ret);
  630. rcu_read_unlock();
  631. return ret;
  632. }
  633. dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n", __func__,
  634. old_freq, freq);
  635. regulators = opp_table->regulators;
  636. /* Only frequency scaling */
  637. if (!regulators) {
  638. rcu_read_unlock();
  639. return _generic_set_opp_clk_only(dev, clk, old_freq, freq);
  640. }
  641. if (opp_table->set_opp)
  642. set_opp = opp_table->set_opp;
  643. else
  644. set_opp = _generic_set_opp;
  645. data = opp_table->set_opp_data;
  646. data->regulators = regulators;
  647. data->regulator_count = opp_table->regulator_count;
  648. data->clk = clk;
  649. data->dev = dev;
  650. data->old_opp.rate = old_freq;
  651. size = sizeof(*opp->supplies) * opp_table->regulator_count;
  652. if (IS_ERR(old_opp))
  653. memset(data->old_opp.supplies, 0, size);
  654. else
  655. memcpy(data->old_opp.supplies, old_opp->supplies, size);
  656. data->new_opp.rate = freq;
  657. memcpy(data->new_opp.supplies, opp->supplies, size);
  658. rcu_read_unlock();
  659. return set_opp(data);
  660. }
  661. EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate);
  662. /* OPP-dev Helpers */
  663. static void _kfree_opp_dev_rcu(struct rcu_head *head)
  664. {
  665. struct opp_device *opp_dev;
  666. opp_dev = container_of(head, struct opp_device, rcu_head);
  667. kfree_rcu(opp_dev, rcu_head);
  668. }
  669. static void _remove_opp_dev(struct opp_device *opp_dev,
  670. struct opp_table *opp_table)
  671. {
  672. opp_debug_unregister(opp_dev, opp_table);
  673. list_del(&opp_dev->node);
  674. call_srcu(&opp_table->srcu_head.srcu, &opp_dev->rcu_head,
  675. _kfree_opp_dev_rcu);
  676. }
  677. struct opp_device *_add_opp_dev(const struct device *dev,
  678. struct opp_table *opp_table)
  679. {
  680. struct opp_device *opp_dev;
  681. int ret;
  682. opp_dev = kzalloc(sizeof(*opp_dev), GFP_KERNEL);
  683. if (!opp_dev)
  684. return NULL;
  685. /* Initialize opp-dev */
  686. opp_dev->dev = dev;
  687. list_add_rcu(&opp_dev->node, &opp_table->dev_list);
  688. /* Create debugfs entries for the opp_table */
  689. ret = opp_debug_register(opp_dev, opp_table);
  690. if (ret)
  691. dev_err(dev, "%s: Failed to register opp debugfs (%d)\n",
  692. __func__, ret);
  693. return opp_dev;
  694. }
  695. /**
  696. * _add_opp_table() - Find OPP table or allocate a new one
  697. * @dev: device for which we do this operation
  698. *
  699. * It tries to find an existing table first, if it couldn't find one, it
  700. * allocates a new OPP table and returns that.
  701. *
  702. * Return: valid opp_table pointer if success, else NULL.
  703. */
  704. static struct opp_table *_add_opp_table(struct device *dev)
  705. {
  706. struct opp_table *opp_table;
  707. struct opp_device *opp_dev;
  708. int ret;
  709. /* Check for existing table for 'dev' first */
  710. opp_table = _find_opp_table(dev);
  711. if (!IS_ERR(opp_table))
  712. return opp_table;
  713. /*
  714. * Allocate a new OPP table. In the infrequent case where a new
  715. * device is needed to be added, we pay this penalty.
  716. */
  717. opp_table = kzalloc(sizeof(*opp_table), GFP_KERNEL);
  718. if (!opp_table)
  719. return NULL;
  720. INIT_LIST_HEAD(&opp_table->dev_list);
  721. opp_dev = _add_opp_dev(dev, opp_table);
  722. if (!opp_dev) {
  723. kfree(opp_table);
  724. return NULL;
  725. }
  726. _of_init_opp_table(opp_table, dev);
  727. /* Find clk for the device */
  728. opp_table->clk = clk_get(dev, NULL);
  729. if (IS_ERR(opp_table->clk)) {
  730. ret = PTR_ERR(opp_table->clk);
  731. if (ret != -EPROBE_DEFER)
  732. dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__,
  733. ret);
  734. }
  735. srcu_init_notifier_head(&opp_table->srcu_head);
  736. INIT_LIST_HEAD(&opp_table->opp_list);
  737. /* Secure the device table modification */
  738. list_add_rcu(&opp_table->node, &opp_tables);
  739. return opp_table;
  740. }
  741. /**
  742. * _kfree_device_rcu() - Free opp_table RCU handler
  743. * @head: RCU head
  744. */
  745. static void _kfree_device_rcu(struct rcu_head *head)
  746. {
  747. struct opp_table *opp_table = container_of(head, struct opp_table,
  748. rcu_head);
  749. kfree_rcu(opp_table, rcu_head);
  750. }
  751. /**
  752. * _remove_opp_table() - Removes a OPP table
  753. * @opp_table: OPP table to be removed.
  754. *
  755. * Removes/frees OPP table if it doesn't contain any OPPs.
  756. */
  757. static void _remove_opp_table(struct opp_table *opp_table)
  758. {
  759. struct opp_device *opp_dev;
  760. if (!list_empty(&opp_table->opp_list))
  761. return;
  762. if (opp_table->supported_hw)
  763. return;
  764. if (opp_table->prop_name)
  765. return;
  766. if (opp_table->regulators)
  767. return;
  768. if (opp_table->set_opp)
  769. return;
  770. /* Release clk */
  771. if (!IS_ERR(opp_table->clk))
  772. clk_put(opp_table->clk);
  773. opp_dev = list_first_entry(&opp_table->dev_list, struct opp_device,
  774. node);
  775. _remove_opp_dev(opp_dev, opp_table);
  776. /* dev_list must be empty now */
  777. WARN_ON(!list_empty(&opp_table->dev_list));
  778. list_del_rcu(&opp_table->node);
  779. call_srcu(&opp_table->srcu_head.srcu, &opp_table->rcu_head,
  780. _kfree_device_rcu);
  781. }
  782. /**
  783. * _kfree_opp_rcu() - Free OPP RCU handler
  784. * @head: RCU head
  785. */
  786. static void _kfree_opp_rcu(struct rcu_head *head)
  787. {
  788. struct dev_pm_opp *opp = container_of(head, struct dev_pm_opp, rcu_head);
  789. kfree_rcu(opp, rcu_head);
  790. }
  791. /**
  792. * _opp_remove() - Remove an OPP from a table definition
  793. * @opp_table: points back to the opp_table struct this opp belongs to
  794. * @opp: pointer to the OPP to remove
  795. * @notify: OPP_EVENT_REMOVE notification should be sent or not
  796. *
  797. * This function removes an opp definition from the opp table.
  798. *
  799. * Locking: The internal opp_table and opp structures are RCU protected.
  800. * It is assumed that the caller holds required mutex for an RCU updater
  801. * strategy.
  802. */
  803. void _opp_remove(struct opp_table *opp_table, struct dev_pm_opp *opp,
  804. bool notify)
  805. {
  806. /*
  807. * Notify the changes in the availability of the operable
  808. * frequency/voltage list.
  809. */
  810. if (notify)
  811. srcu_notifier_call_chain(&opp_table->srcu_head,
  812. OPP_EVENT_REMOVE, opp);
  813. opp_debug_remove_one(opp);
  814. list_del_rcu(&opp->node);
  815. call_srcu(&opp_table->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
  816. _remove_opp_table(opp_table);
  817. }
  818. /**
  819. * dev_pm_opp_remove() - Remove an OPP from OPP table
  820. * @dev: device for which we do this operation
  821. * @freq: OPP to remove with matching 'freq'
  822. *
  823. * This function removes an opp from the opp table.
  824. *
  825. * Locking: The internal opp_table and opp structures are RCU protected.
  826. * Hence this function internally uses RCU updater strategy with mutex locks
  827. * to keep the integrity of the internal data structures. Callers should ensure
  828. * that this function is *NOT* called under RCU protection or in contexts where
  829. * mutex cannot be locked.
  830. */
  831. void dev_pm_opp_remove(struct device *dev, unsigned long freq)
  832. {
  833. struct dev_pm_opp *opp;
  834. struct opp_table *opp_table;
  835. bool found = false;
  836. /* Hold our table modification lock here */
  837. mutex_lock(&opp_table_lock);
  838. opp_table = _find_opp_table(dev);
  839. if (IS_ERR(opp_table))
  840. goto unlock;
  841. list_for_each_entry(opp, &opp_table->opp_list, node) {
  842. if (opp->rate == freq) {
  843. found = true;
  844. break;
  845. }
  846. }
  847. if (!found) {
  848. dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n",
  849. __func__, freq);
  850. goto unlock;
  851. }
  852. _opp_remove(opp_table, opp, true);
  853. unlock:
  854. mutex_unlock(&opp_table_lock);
  855. }
  856. EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
  857. struct dev_pm_opp *_allocate_opp(struct device *dev,
  858. struct opp_table **opp_table)
  859. {
  860. struct dev_pm_opp *opp;
  861. int count, supply_size;
  862. struct opp_table *table;
  863. table = _add_opp_table(dev);
  864. if (!table)
  865. return NULL;
  866. /* Allocate space for at least one supply */
  867. count = table->regulator_count ? table->regulator_count : 1;
  868. supply_size = sizeof(*opp->supplies) * count;
  869. /* allocate new OPP node and supplies structures */
  870. opp = kzalloc(sizeof(*opp) + supply_size, GFP_KERNEL);
  871. if (!opp) {
  872. kfree(table);
  873. return NULL;
  874. }
  875. /* Put the supplies at the end of the OPP structure as an empty array */
  876. opp->supplies = (struct dev_pm_opp_supply *)(opp + 1);
  877. INIT_LIST_HEAD(&opp->node);
  878. *opp_table = table;
  879. return opp;
  880. }
  881. static bool _opp_supported_by_regulators(struct dev_pm_opp *opp,
  882. struct opp_table *opp_table)
  883. {
  884. struct regulator *reg;
  885. int i;
  886. for (i = 0; i < opp_table->regulator_count; i++) {
  887. reg = opp_table->regulators[i];
  888. if (!regulator_is_supported_voltage(reg,
  889. opp->supplies[i].u_volt_min,
  890. opp->supplies[i].u_volt_max)) {
  891. pr_warn("%s: OPP minuV: %lu maxuV: %lu, not supported by regulator\n",
  892. __func__, opp->supplies[i].u_volt_min,
  893. opp->supplies[i].u_volt_max);
  894. return false;
  895. }
  896. }
  897. return true;
  898. }
  899. int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
  900. struct opp_table *opp_table)
  901. {
  902. struct dev_pm_opp *opp;
  903. struct list_head *head = &opp_table->opp_list;
  904. int ret;
  905. /*
  906. * Insert new OPP in order of increasing frequency and discard if
  907. * already present.
  908. *
  909. * Need to use &opp_table->opp_list in the condition part of the 'for'
  910. * loop, don't replace it with head otherwise it will become an infinite
  911. * loop.
  912. */
  913. list_for_each_entry_rcu(opp, &opp_table->opp_list, node) {
  914. if (new_opp->rate > opp->rate) {
  915. head = &opp->node;
  916. continue;
  917. }
  918. if (new_opp->rate < opp->rate)
  919. break;
  920. /* Duplicate OPPs */
  921. dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n",
  922. __func__, opp->rate, opp->supplies[0].u_volt,
  923. opp->available, new_opp->rate,
  924. new_opp->supplies[0].u_volt, new_opp->available);
  925. /* Should we compare voltages for all regulators here ? */
  926. return opp->available &&
  927. new_opp->supplies[0].u_volt == opp->supplies[0].u_volt ? 0 : -EEXIST;
  928. }
  929. new_opp->opp_table = opp_table;
  930. list_add_rcu(&new_opp->node, head);
  931. ret = opp_debug_create_one(new_opp, opp_table);
  932. if (ret)
  933. dev_err(dev, "%s: Failed to register opp to debugfs (%d)\n",
  934. __func__, ret);
  935. if (!_opp_supported_by_regulators(new_opp, opp_table)) {
  936. new_opp->available = false;
  937. dev_warn(dev, "%s: OPP not supported by regulators (%lu)\n",
  938. __func__, new_opp->rate);
  939. }
  940. return 0;
  941. }
  942. /**
  943. * _opp_add_v1() - Allocate a OPP based on v1 bindings.
  944. * @dev: device for which we do this operation
  945. * @freq: Frequency in Hz for this OPP
  946. * @u_volt: Voltage in uVolts for this OPP
  947. * @dynamic: Dynamically added OPPs.
  948. *
  949. * This function adds an opp definition to the opp table and returns status.
  950. * The opp is made available by default and it can be controlled using
  951. * dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove.
  952. *
  953. * NOTE: "dynamic" parameter impacts OPPs added by the dev_pm_opp_of_add_table
  954. * and freed by dev_pm_opp_of_remove_table.
  955. *
  956. * Locking: The internal opp_table and opp structures are RCU protected.
  957. * Hence this function internally uses RCU updater strategy with mutex locks
  958. * to keep the integrity of the internal data structures. Callers should ensure
  959. * that this function is *NOT* called under RCU protection or in contexts where
  960. * mutex cannot be locked.
  961. *
  962. * Return:
  963. * 0 On success OR
  964. * Duplicate OPPs (both freq and volt are same) and opp->available
  965. * -EEXIST Freq are same and volt are different OR
  966. * Duplicate OPPs (both freq and volt are same) and !opp->available
  967. * -ENOMEM Memory allocation failure
  968. */
  969. int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt,
  970. bool dynamic)
  971. {
  972. struct opp_table *opp_table;
  973. struct dev_pm_opp *new_opp;
  974. unsigned long tol;
  975. int ret;
  976. /* Hold our table modification lock here */
  977. mutex_lock(&opp_table_lock);
  978. new_opp = _allocate_opp(dev, &opp_table);
  979. if (!new_opp) {
  980. ret = -ENOMEM;
  981. goto unlock;
  982. }
  983. /* populate the opp table */
  984. new_opp->rate = freq;
  985. tol = u_volt * opp_table->voltage_tolerance_v1 / 100;
  986. new_opp->supplies[0].u_volt = u_volt;
  987. new_opp->supplies[0].u_volt_min = u_volt - tol;
  988. new_opp->supplies[0].u_volt_max = u_volt + tol;
  989. new_opp->available = true;
  990. new_opp->dynamic = dynamic;
  991. ret = _opp_add(dev, new_opp, opp_table);
  992. if (ret)
  993. goto free_opp;
  994. mutex_unlock(&opp_table_lock);
  995. /*
  996. * Notify the changes in the availability of the operable
  997. * frequency/voltage list.
  998. */
  999. srcu_notifier_call_chain(&opp_table->srcu_head, OPP_EVENT_ADD, new_opp);
  1000. return 0;
  1001. free_opp:
  1002. _opp_remove(opp_table, new_opp, false);
  1003. unlock:
  1004. mutex_unlock(&opp_table_lock);
  1005. return ret;
  1006. }
  1007. /**
  1008. * dev_pm_opp_set_supported_hw() - Set supported platforms
  1009. * @dev: Device for which supported-hw has to be set.
  1010. * @versions: Array of hierarchy of versions to match.
  1011. * @count: Number of elements in the array.
  1012. *
  1013. * This is required only for the V2 bindings, and it enables a platform to
  1014. * specify the hierarchy of versions it supports. OPP layer will then enable
  1015. * OPPs, which are available for those versions, based on its 'opp-supported-hw'
  1016. * property.
  1017. *
  1018. * Locking: The internal opp_table and opp structures are RCU protected.
  1019. * Hence this function internally uses RCU updater strategy with mutex locks
  1020. * to keep the integrity of the internal data structures. Callers should ensure
  1021. * that this function is *NOT* called under RCU protection or in contexts where
  1022. * mutex cannot be locked.
  1023. */
  1024. int dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions,
  1025. unsigned int count)
  1026. {
  1027. struct opp_table *opp_table;
  1028. int ret = 0;
  1029. /* Hold our table modification lock here */
  1030. mutex_lock(&opp_table_lock);
  1031. opp_table = _add_opp_table(dev);
  1032. if (!opp_table) {
  1033. ret = -ENOMEM;
  1034. goto unlock;
  1035. }
  1036. /* Make sure there are no concurrent readers while updating opp_table */
  1037. WARN_ON(!list_empty(&opp_table->opp_list));
  1038. /* Do we already have a version hierarchy associated with opp_table? */
  1039. if (opp_table->supported_hw) {
  1040. dev_err(dev, "%s: Already have supported hardware list\n",
  1041. __func__);
  1042. ret = -EBUSY;
  1043. goto err;
  1044. }
  1045. opp_table->supported_hw = kmemdup(versions, count * sizeof(*versions),
  1046. GFP_KERNEL);
  1047. if (!opp_table->supported_hw) {
  1048. ret = -ENOMEM;
  1049. goto err;
  1050. }
  1051. opp_table->supported_hw_count = count;
  1052. mutex_unlock(&opp_table_lock);
  1053. return 0;
  1054. err:
  1055. _remove_opp_table(opp_table);
  1056. unlock:
  1057. mutex_unlock(&opp_table_lock);
  1058. return ret;
  1059. }
  1060. EXPORT_SYMBOL_GPL(dev_pm_opp_set_supported_hw);
  1061. /**
  1062. * dev_pm_opp_put_supported_hw() - Releases resources blocked for supported hw
  1063. * @dev: Device for which supported-hw has to be put.
  1064. *
  1065. * This is required only for the V2 bindings, and is called for a matching
  1066. * dev_pm_opp_set_supported_hw(). Until this is called, the opp_table structure
  1067. * will not be freed.
  1068. *
  1069. * Locking: The internal opp_table and opp structures are RCU protected.
  1070. * Hence this function internally uses RCU updater strategy with mutex locks
  1071. * to keep the integrity of the internal data structures. Callers should ensure
  1072. * that this function is *NOT* called under RCU protection or in contexts where
  1073. * mutex cannot be locked.
  1074. */
  1075. void dev_pm_opp_put_supported_hw(struct device *dev)
  1076. {
  1077. struct opp_table *opp_table;
  1078. /* Hold our table modification lock here */
  1079. mutex_lock(&opp_table_lock);
  1080. /* Check for existing table for 'dev' first */
  1081. opp_table = _find_opp_table(dev);
  1082. if (IS_ERR(opp_table)) {
  1083. dev_err(dev, "Failed to find opp_table: %ld\n",
  1084. PTR_ERR(opp_table));
  1085. goto unlock;
  1086. }
  1087. /* Make sure there are no concurrent readers while updating opp_table */
  1088. WARN_ON(!list_empty(&opp_table->opp_list));
  1089. if (!opp_table->supported_hw) {
  1090. dev_err(dev, "%s: Doesn't have supported hardware list\n",
  1091. __func__);
  1092. goto unlock;
  1093. }
  1094. kfree(opp_table->supported_hw);
  1095. opp_table->supported_hw = NULL;
  1096. opp_table->supported_hw_count = 0;
  1097. /* Try freeing opp_table if this was the last blocking resource */
  1098. _remove_opp_table(opp_table);
  1099. unlock:
  1100. mutex_unlock(&opp_table_lock);
  1101. }
  1102. EXPORT_SYMBOL_GPL(dev_pm_opp_put_supported_hw);
  1103. /**
  1104. * dev_pm_opp_set_prop_name() - Set prop-extn name
  1105. * @dev: Device for which the prop-name has to be set.
  1106. * @name: name to postfix to properties.
  1107. *
  1108. * This is required only for the V2 bindings, and it enables a platform to
  1109. * specify the extn to be used for certain property names. The properties to
  1110. * which the extension will apply are opp-microvolt and opp-microamp. OPP core
  1111. * should postfix the property name with -<name> while looking for them.
  1112. *
  1113. * Locking: The internal opp_table and opp structures are RCU protected.
  1114. * Hence this function internally uses RCU updater strategy with mutex locks
  1115. * to keep the integrity of the internal data structures. Callers should ensure
  1116. * that this function is *NOT* called under RCU protection or in contexts where
  1117. * mutex cannot be locked.
  1118. */
  1119. int dev_pm_opp_set_prop_name(struct device *dev, const char *name)
  1120. {
  1121. struct opp_table *opp_table;
  1122. int ret = 0;
  1123. /* Hold our table modification lock here */
  1124. mutex_lock(&opp_table_lock);
  1125. opp_table = _add_opp_table(dev);
  1126. if (!opp_table) {
  1127. ret = -ENOMEM;
  1128. goto unlock;
  1129. }
  1130. /* Make sure there are no concurrent readers while updating opp_table */
  1131. WARN_ON(!list_empty(&opp_table->opp_list));
  1132. /* Do we already have a prop-name associated with opp_table? */
  1133. if (opp_table->prop_name) {
  1134. dev_err(dev, "%s: Already have prop-name %s\n", __func__,
  1135. opp_table->prop_name);
  1136. ret = -EBUSY;
  1137. goto err;
  1138. }
  1139. opp_table->prop_name = kstrdup(name, GFP_KERNEL);
  1140. if (!opp_table->prop_name) {
  1141. ret = -ENOMEM;
  1142. goto err;
  1143. }
  1144. mutex_unlock(&opp_table_lock);
  1145. return 0;
  1146. err:
  1147. _remove_opp_table(opp_table);
  1148. unlock:
  1149. mutex_unlock(&opp_table_lock);
  1150. return ret;
  1151. }
  1152. EXPORT_SYMBOL_GPL(dev_pm_opp_set_prop_name);
  1153. /**
  1154. * dev_pm_opp_put_prop_name() - Releases resources blocked for prop-name
  1155. * @dev: Device for which the prop-name has to be put.
  1156. *
  1157. * This is required only for the V2 bindings, and is called for a matching
  1158. * dev_pm_opp_set_prop_name(). Until this is called, the opp_table structure
  1159. * will not be freed.
  1160. *
  1161. * Locking: The internal opp_table and opp structures are RCU protected.
  1162. * Hence this function internally uses RCU updater strategy with mutex locks
  1163. * to keep the integrity of the internal data structures. Callers should ensure
  1164. * that this function is *NOT* called under RCU protection or in contexts where
  1165. * mutex cannot be locked.
  1166. */
  1167. void dev_pm_opp_put_prop_name(struct device *dev)
  1168. {
  1169. struct opp_table *opp_table;
  1170. /* Hold our table modification lock here */
  1171. mutex_lock(&opp_table_lock);
  1172. /* Check for existing table for 'dev' first */
  1173. opp_table = _find_opp_table(dev);
  1174. if (IS_ERR(opp_table)) {
  1175. dev_err(dev, "Failed to find opp_table: %ld\n",
  1176. PTR_ERR(opp_table));
  1177. goto unlock;
  1178. }
  1179. /* Make sure there are no concurrent readers while updating opp_table */
  1180. WARN_ON(!list_empty(&opp_table->opp_list));
  1181. if (!opp_table->prop_name) {
  1182. dev_err(dev, "%s: Doesn't have a prop-name\n", __func__);
  1183. goto unlock;
  1184. }
  1185. kfree(opp_table->prop_name);
  1186. opp_table->prop_name = NULL;
  1187. /* Try freeing opp_table if this was the last blocking resource */
  1188. _remove_opp_table(opp_table);
  1189. unlock:
  1190. mutex_unlock(&opp_table_lock);
  1191. }
  1192. EXPORT_SYMBOL_GPL(dev_pm_opp_put_prop_name);
  1193. static int _allocate_set_opp_data(struct opp_table *opp_table)
  1194. {
  1195. struct dev_pm_set_opp_data *data;
  1196. int len, count = opp_table->regulator_count;
  1197. if (WARN_ON(!count))
  1198. return -EINVAL;
  1199. /* space for set_opp_data */
  1200. len = sizeof(*data);
  1201. /* space for old_opp.supplies and new_opp.supplies */
  1202. len += 2 * sizeof(struct dev_pm_opp_supply) * count;
  1203. data = kzalloc(len, GFP_KERNEL);
  1204. if (!data)
  1205. return -ENOMEM;
  1206. data->old_opp.supplies = (void *)(data + 1);
  1207. data->new_opp.supplies = data->old_opp.supplies + count;
  1208. opp_table->set_opp_data = data;
  1209. return 0;
  1210. }
  1211. static void _free_set_opp_data(struct opp_table *opp_table)
  1212. {
  1213. kfree(opp_table->set_opp_data);
  1214. opp_table->set_opp_data = NULL;
  1215. }
  1216. /**
  1217. * dev_pm_opp_set_regulators() - Set regulator names for the device
  1218. * @dev: Device for which regulator name is being set.
  1219. * @names: Array of pointers to the names of the regulator.
  1220. * @count: Number of regulators.
  1221. *
  1222. * In order to support OPP switching, OPP layer needs to know the name of the
  1223. * device's regulators, as the core would be required to switch voltages as
  1224. * well.
  1225. *
  1226. * This must be called before any OPPs are initialized for the device.
  1227. *
  1228. * Locking: The internal opp_table and opp structures are RCU protected.
  1229. * Hence this function internally uses RCU updater strategy with mutex locks
  1230. * to keep the integrity of the internal data structures. Callers should ensure
  1231. * that this function is *NOT* called under RCU protection or in contexts where
  1232. * mutex cannot be locked.
  1233. */
  1234. struct opp_table *dev_pm_opp_set_regulators(struct device *dev,
  1235. const char * const names[],
  1236. unsigned int count)
  1237. {
  1238. struct opp_table *opp_table;
  1239. struct regulator *reg;
  1240. int ret, i;
  1241. mutex_lock(&opp_table_lock);
  1242. opp_table = _add_opp_table(dev);
  1243. if (!opp_table) {
  1244. ret = -ENOMEM;
  1245. goto unlock;
  1246. }
  1247. /* This should be called before OPPs are initialized */
  1248. if (WARN_ON(!list_empty(&opp_table->opp_list))) {
  1249. ret = -EBUSY;
  1250. goto err;
  1251. }
  1252. /* Already have regulators set */
  1253. if (opp_table->regulators) {
  1254. ret = -EBUSY;
  1255. goto err;
  1256. }
  1257. opp_table->regulators = kmalloc_array(count,
  1258. sizeof(*opp_table->regulators),
  1259. GFP_KERNEL);
  1260. if (!opp_table->regulators) {
  1261. ret = -ENOMEM;
  1262. goto err;
  1263. }
  1264. for (i = 0; i < count; i++) {
  1265. reg = regulator_get_optional(dev, names[i]);
  1266. if (IS_ERR(reg)) {
  1267. ret = PTR_ERR(reg);
  1268. if (ret != -EPROBE_DEFER)
  1269. dev_err(dev, "%s: no regulator (%s) found: %d\n",
  1270. __func__, names[i], ret);
  1271. goto free_regulators;
  1272. }
  1273. opp_table->regulators[i] = reg;
  1274. }
  1275. opp_table->regulator_count = count;
  1276. /* Allocate block only once to pass to set_opp() routines */
  1277. ret = _allocate_set_opp_data(opp_table);
  1278. if (ret)
  1279. goto free_regulators;
  1280. mutex_unlock(&opp_table_lock);
  1281. return opp_table;
  1282. free_regulators:
  1283. while (i != 0)
  1284. regulator_put(opp_table->regulators[--i]);
  1285. kfree(opp_table->regulators);
  1286. opp_table->regulators = NULL;
  1287. opp_table->regulator_count = 0;
  1288. err:
  1289. _remove_opp_table(opp_table);
  1290. unlock:
  1291. mutex_unlock(&opp_table_lock);
  1292. return ERR_PTR(ret);
  1293. }
  1294. EXPORT_SYMBOL_GPL(dev_pm_opp_set_regulators);
  1295. /**
  1296. * dev_pm_opp_put_regulators() - Releases resources blocked for regulator
  1297. * @opp_table: OPP table returned from dev_pm_opp_set_regulators().
  1298. *
  1299. * Locking: The internal opp_table and opp structures are RCU protected.
  1300. * Hence this function internally uses RCU updater strategy with mutex locks
  1301. * to keep the integrity of the internal data structures. Callers should ensure
  1302. * that this function is *NOT* called under RCU protection or in contexts where
  1303. * mutex cannot be locked.
  1304. */
  1305. void dev_pm_opp_put_regulators(struct opp_table *opp_table)
  1306. {
  1307. int i;
  1308. mutex_lock(&opp_table_lock);
  1309. if (!opp_table->regulators) {
  1310. pr_err("%s: Doesn't have regulators set\n", __func__);
  1311. goto unlock;
  1312. }
  1313. /* Make sure there are no concurrent readers while updating opp_table */
  1314. WARN_ON(!list_empty(&opp_table->opp_list));
  1315. for (i = opp_table->regulator_count - 1; i >= 0; i--)
  1316. regulator_put(opp_table->regulators[i]);
  1317. _free_set_opp_data(opp_table);
  1318. kfree(opp_table->regulators);
  1319. opp_table->regulators = NULL;
  1320. opp_table->regulator_count = 0;
  1321. /* Try freeing opp_table if this was the last blocking resource */
  1322. _remove_opp_table(opp_table);
  1323. unlock:
  1324. mutex_unlock(&opp_table_lock);
  1325. }
  1326. EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulators);
  1327. /**
  1328. * dev_pm_opp_register_set_opp_helper() - Register custom set OPP helper
  1329. * @dev: Device for which the helper is getting registered.
  1330. * @set_opp: Custom set OPP helper.
  1331. *
  1332. * This is useful to support complex platforms (like platforms with multiple
  1333. * regulators per device), instead of the generic OPP set rate helper.
  1334. *
  1335. * This must be called before any OPPs are initialized for the device.
  1336. *
  1337. * Locking: The internal opp_table and opp structures are RCU protected.
  1338. * Hence this function internally uses RCU updater strategy with mutex locks
  1339. * to keep the integrity of the internal data structures. Callers should ensure
  1340. * that this function is *NOT* called under RCU protection or in contexts where
  1341. * mutex cannot be locked.
  1342. */
  1343. int dev_pm_opp_register_set_opp_helper(struct device *dev,
  1344. int (*set_opp)(struct dev_pm_set_opp_data *data))
  1345. {
  1346. struct opp_table *opp_table;
  1347. int ret;
  1348. if (!set_opp)
  1349. return -EINVAL;
  1350. mutex_lock(&opp_table_lock);
  1351. opp_table = _add_opp_table(dev);
  1352. if (!opp_table) {
  1353. ret = -ENOMEM;
  1354. goto unlock;
  1355. }
  1356. /* This should be called before OPPs are initialized */
  1357. if (WARN_ON(!list_empty(&opp_table->opp_list))) {
  1358. ret = -EBUSY;
  1359. goto err;
  1360. }
  1361. /* Already have custom set_opp helper */
  1362. if (WARN_ON(opp_table->set_opp)) {
  1363. ret = -EBUSY;
  1364. goto err;
  1365. }
  1366. opp_table->set_opp = set_opp;
  1367. mutex_unlock(&opp_table_lock);
  1368. return 0;
  1369. err:
  1370. _remove_opp_table(opp_table);
  1371. unlock:
  1372. mutex_unlock(&opp_table_lock);
  1373. return ret;
  1374. }
  1375. EXPORT_SYMBOL_GPL(dev_pm_opp_register_set_opp_helper);
  1376. /**
  1377. * dev_pm_opp_register_put_opp_helper() - Releases resources blocked for
  1378. * set_opp helper
  1379. * @dev: Device for which custom set_opp helper has to be cleared.
  1380. *
  1381. * Locking: The internal opp_table and opp structures are RCU protected.
  1382. * Hence this function internally uses RCU updater strategy with mutex locks
  1383. * to keep the integrity of the internal data structures. Callers should ensure
  1384. * that this function is *NOT* called under RCU protection or in contexts where
  1385. * mutex cannot be locked.
  1386. */
  1387. void dev_pm_opp_register_put_opp_helper(struct device *dev)
  1388. {
  1389. struct opp_table *opp_table;
  1390. mutex_lock(&opp_table_lock);
  1391. /* Check for existing table for 'dev' first */
  1392. opp_table = _find_opp_table(dev);
  1393. if (IS_ERR(opp_table)) {
  1394. dev_err(dev, "Failed to find opp_table: %ld\n",
  1395. PTR_ERR(opp_table));
  1396. goto unlock;
  1397. }
  1398. if (!opp_table->set_opp) {
  1399. dev_err(dev, "%s: Doesn't have custom set_opp helper set\n",
  1400. __func__);
  1401. goto unlock;
  1402. }
  1403. /* Make sure there are no concurrent readers while updating opp_table */
  1404. WARN_ON(!list_empty(&opp_table->opp_list));
  1405. opp_table->set_opp = NULL;
  1406. /* Try freeing opp_table if this was the last blocking resource */
  1407. _remove_opp_table(opp_table);
  1408. unlock:
  1409. mutex_unlock(&opp_table_lock);
  1410. }
  1411. EXPORT_SYMBOL_GPL(dev_pm_opp_register_put_opp_helper);
  1412. /**
  1413. * dev_pm_opp_add() - Add an OPP table from a table definitions
  1414. * @dev: device for which we do this operation
  1415. * @freq: Frequency in Hz for this OPP
  1416. * @u_volt: Voltage in uVolts for this OPP
  1417. *
  1418. * This function adds an opp definition to the opp table and returns status.
  1419. * The opp is made available by default and it can be controlled using
  1420. * dev_pm_opp_enable/disable functions.
  1421. *
  1422. * Locking: The internal opp_table and opp structures are RCU protected.
  1423. * Hence this function internally uses RCU updater strategy with mutex locks
  1424. * to keep the integrity of the internal data structures. Callers should ensure
  1425. * that this function is *NOT* called under RCU protection or in contexts where
  1426. * mutex cannot be locked.
  1427. *
  1428. * Return:
  1429. * 0 On success OR
  1430. * Duplicate OPPs (both freq and volt are same) and opp->available
  1431. * -EEXIST Freq are same and volt are different OR
  1432. * Duplicate OPPs (both freq and volt are same) and !opp->available
  1433. * -ENOMEM Memory allocation failure
  1434. */
  1435. int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
  1436. {
  1437. return _opp_add_v1(dev, freq, u_volt, true);
  1438. }
  1439. EXPORT_SYMBOL_GPL(dev_pm_opp_add);
  1440. /**
  1441. * _opp_set_availability() - helper to set the availability of an opp
  1442. * @dev: device for which we do this operation
  1443. * @freq: OPP frequency to modify availability
  1444. * @availability_req: availability status requested for this opp
  1445. *
  1446. * Set the availability of an OPP with an RCU operation, opp_{enable,disable}
  1447. * share a common logic which is isolated here.
  1448. *
  1449. * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
  1450. * copy operation, returns 0 if no modification was done OR modification was
  1451. * successful.
  1452. *
  1453. * Locking: The internal opp_table and opp structures are RCU protected.
  1454. * Hence this function internally uses RCU updater strategy with mutex locks to
  1455. * keep the integrity of the internal data structures. Callers should ensure
  1456. * that this function is *NOT* called under RCU protection or in contexts where
  1457. * mutex locking or synchronize_rcu() blocking calls cannot be used.
  1458. */
  1459. static int _opp_set_availability(struct device *dev, unsigned long freq,
  1460. bool availability_req)
  1461. {
  1462. struct opp_table *opp_table;
  1463. struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
  1464. int r = 0;
  1465. /* keep the node allocated */
  1466. new_opp = kmalloc(sizeof(*new_opp), GFP_KERNEL);
  1467. if (!new_opp)
  1468. return -ENOMEM;
  1469. mutex_lock(&opp_table_lock);
  1470. /* Find the opp_table */
  1471. opp_table = _find_opp_table(dev);
  1472. if (IS_ERR(opp_table)) {
  1473. r = PTR_ERR(opp_table);
  1474. dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
  1475. goto unlock;
  1476. }
  1477. /* Do we have the frequency? */
  1478. list_for_each_entry(tmp_opp, &opp_table->opp_list, node) {
  1479. if (tmp_opp->rate == freq) {
  1480. opp = tmp_opp;
  1481. break;
  1482. }
  1483. }
  1484. if (IS_ERR(opp)) {
  1485. r = PTR_ERR(opp);
  1486. goto unlock;
  1487. }
  1488. /* Is update really needed? */
  1489. if (opp->available == availability_req)
  1490. goto unlock;
  1491. /* copy the old data over */
  1492. *new_opp = *opp;
  1493. /* plug in new node */
  1494. new_opp->available = availability_req;
  1495. list_replace_rcu(&opp->node, &new_opp->node);
  1496. mutex_unlock(&opp_table_lock);
  1497. call_srcu(&opp_table->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
  1498. /* Notify the change of the OPP availability */
  1499. if (availability_req)
  1500. srcu_notifier_call_chain(&opp_table->srcu_head,
  1501. OPP_EVENT_ENABLE, new_opp);
  1502. else
  1503. srcu_notifier_call_chain(&opp_table->srcu_head,
  1504. OPP_EVENT_DISABLE, new_opp);
  1505. return 0;
  1506. unlock:
  1507. mutex_unlock(&opp_table_lock);
  1508. kfree(new_opp);
  1509. return r;
  1510. }
  1511. /**
  1512. * dev_pm_opp_enable() - Enable a specific OPP
  1513. * @dev: device for which we do this operation
  1514. * @freq: OPP frequency to enable
  1515. *
  1516. * Enables a provided opp. If the operation is valid, this returns 0, else the
  1517. * corresponding error value. It is meant to be used for users an OPP available
  1518. * after being temporarily made unavailable with dev_pm_opp_disable.
  1519. *
  1520. * Locking: The internal opp_table and opp structures are RCU protected.
  1521. * Hence this function indirectly uses RCU and mutex locks to keep the
  1522. * integrity of the internal data structures. Callers should ensure that
  1523. * this function is *NOT* called under RCU protection or in contexts where
  1524. * mutex locking or synchronize_rcu() blocking calls cannot be used.
  1525. *
  1526. * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
  1527. * copy operation, returns 0 if no modification was done OR modification was
  1528. * successful.
  1529. */
  1530. int dev_pm_opp_enable(struct device *dev, unsigned long freq)
  1531. {
  1532. return _opp_set_availability(dev, freq, true);
  1533. }
  1534. EXPORT_SYMBOL_GPL(dev_pm_opp_enable);
  1535. /**
  1536. * dev_pm_opp_disable() - Disable a specific OPP
  1537. * @dev: device for which we do this operation
  1538. * @freq: OPP frequency to disable
  1539. *
  1540. * Disables a provided opp. If the operation is valid, this returns
  1541. * 0, else the corresponding error value. It is meant to be a temporary
  1542. * control by users to make this OPP not available until the circumstances are
  1543. * right to make it available again (with a call to dev_pm_opp_enable).
  1544. *
  1545. * Locking: The internal opp_table and opp structures are RCU protected.
  1546. * Hence this function indirectly uses RCU and mutex locks to keep the
  1547. * integrity of the internal data structures. Callers should ensure that
  1548. * this function is *NOT* called under RCU protection or in contexts where
  1549. * mutex locking or synchronize_rcu() blocking calls cannot be used.
  1550. *
  1551. * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
  1552. * copy operation, returns 0 if no modification was done OR modification was
  1553. * successful.
  1554. */
  1555. int dev_pm_opp_disable(struct device *dev, unsigned long freq)
  1556. {
  1557. return _opp_set_availability(dev, freq, false);
  1558. }
  1559. EXPORT_SYMBOL_GPL(dev_pm_opp_disable);
  1560. /**
  1561. * dev_pm_opp_get_notifier() - find notifier_head of the device with opp
  1562. * @dev: device pointer used to lookup OPP table.
  1563. *
  1564. * Return: pointer to notifier head if found, otherwise -ENODEV or
  1565. * -EINVAL based on type of error casted as pointer. value must be checked
  1566. * with IS_ERR to determine valid pointer or error result.
  1567. *
  1568. * Locking: This function must be called under rcu_read_lock(). opp_table is a
  1569. * RCU protected pointer. The reason for the same is that the opp pointer which
  1570. * is returned will remain valid for use with opp_get_{voltage, freq} only while
  1571. * under the locked area. The pointer returned must be used prior to unlocking
  1572. * with rcu_read_unlock() to maintain the integrity of the pointer.
  1573. */
  1574. struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev)
  1575. {
  1576. struct opp_table *opp_table = _find_opp_table(dev);
  1577. if (IS_ERR(opp_table))
  1578. return ERR_CAST(opp_table); /* matching type */
  1579. return &opp_table->srcu_head;
  1580. }
  1581. EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier);
  1582. /*
  1583. * Free OPPs either created using static entries present in DT or even the
  1584. * dynamically added entries based on remove_all param.
  1585. */
  1586. void _dev_pm_opp_remove_table(struct device *dev, bool remove_all)
  1587. {
  1588. struct opp_table *opp_table;
  1589. struct dev_pm_opp *opp, *tmp;
  1590. /* Hold our table modification lock here */
  1591. mutex_lock(&opp_table_lock);
  1592. /* Check for existing table for 'dev' */
  1593. opp_table = _find_opp_table(dev);
  1594. if (IS_ERR(opp_table)) {
  1595. int error = PTR_ERR(opp_table);
  1596. if (error != -ENODEV)
  1597. WARN(1, "%s: opp_table: %d\n",
  1598. IS_ERR_OR_NULL(dev) ?
  1599. "Invalid device" : dev_name(dev),
  1600. error);
  1601. goto unlock;
  1602. }
  1603. /* Find if opp_table manages a single device */
  1604. if (list_is_singular(&opp_table->dev_list)) {
  1605. /* Free static OPPs */
  1606. list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) {
  1607. if (remove_all || !opp->dynamic)
  1608. _opp_remove(opp_table, opp, true);
  1609. }
  1610. } else {
  1611. _remove_opp_dev(_find_opp_dev(dev, opp_table), opp_table);
  1612. }
  1613. unlock:
  1614. mutex_unlock(&opp_table_lock);
  1615. }
  1616. /**
  1617. * dev_pm_opp_remove_table() - Free all OPPs associated with the device
  1618. * @dev: device pointer used to lookup OPP table.
  1619. *
  1620. * Free both OPPs created using static entries present in DT and the
  1621. * dynamically added entries.
  1622. *
  1623. * Locking: The internal opp_table and opp structures are RCU protected.
  1624. * Hence this function indirectly uses RCU updater strategy with mutex locks
  1625. * to keep the integrity of the internal data structures. Callers should ensure
  1626. * that this function is *NOT* called under RCU protection or in contexts where
  1627. * mutex cannot be locked.
  1628. */
  1629. void dev_pm_opp_remove_table(struct device *dev)
  1630. {
  1631. _dev_pm_opp_remove_table(dev, true);
  1632. }
  1633. EXPORT_SYMBOL_GPL(dev_pm_opp_remove_table);