core.c 47 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659
  1. /*
  2. * Generic OPP Interface
  3. *
  4. * Copyright (C) 2009-2010 Texas Instruments Incorporated.
  5. * Nishanth Menon
  6. * Romit Dasgupta
  7. * Kevin Hilman
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. */
  13. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  14. #include <linux/clk.h>
  15. #include <linux/errno.h>
  16. #include <linux/err.h>
  17. #include <linux/slab.h>
  18. #include <linux/device.h>
  19. #include <linux/export.h>
  20. #include <linux/regulator/consumer.h>
  21. #include "opp.h"
  22. /*
  23. * The root of the list of all opp-tables. All opp_table structures branch off
  24. * from here, with each opp_table containing the list of opps it supports in
  25. * various states of availability.
  26. */
  27. LIST_HEAD(opp_tables);
  28. /* Lock to allow exclusive modification to the device and opp lists */
  29. DEFINE_MUTEX(opp_table_lock);
  30. #define opp_rcu_lockdep_assert() \
  31. do { \
  32. RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
  33. !lockdep_is_held(&opp_table_lock), \
  34. "Missing rcu_read_lock() or " \
  35. "opp_table_lock protection"); \
  36. } while (0)
  37. static struct opp_device *_find_opp_dev(const struct device *dev,
  38. struct opp_table *opp_table)
  39. {
  40. struct opp_device *opp_dev;
  41. list_for_each_entry(opp_dev, &opp_table->dev_list, node)
  42. if (opp_dev->dev == dev)
  43. return opp_dev;
  44. return NULL;
  45. }
  46. /**
  47. * _find_opp_table() - find opp_table struct using device pointer
  48. * @dev: device pointer used to lookup OPP table
  49. *
  50. * Search OPP table for one containing matching device. Does a RCU reader
  51. * operation to grab the pointer needed.
  52. *
  53. * Return: pointer to 'struct opp_table' if found, otherwise -ENODEV or
  54. * -EINVAL based on type of error.
  55. *
  56. * Locking: For readers, this function must be called under rcu_read_lock().
  57. * opp_table is a RCU protected pointer, which means that opp_table is valid
  58. * as long as we are under RCU lock.
  59. *
  60. * For Writers, this function must be called with opp_table_lock held.
  61. */
  62. struct opp_table *_find_opp_table(struct device *dev)
  63. {
  64. struct opp_table *opp_table;
  65. opp_rcu_lockdep_assert();
  66. if (IS_ERR_OR_NULL(dev)) {
  67. pr_err("%s: Invalid parameters\n", __func__);
  68. return ERR_PTR(-EINVAL);
  69. }
  70. list_for_each_entry_rcu(opp_table, &opp_tables, node)
  71. if (_find_opp_dev(dev, opp_table))
  72. return opp_table;
  73. return ERR_PTR(-ENODEV);
  74. }
  75. /**
  76. * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an opp
  77. * @opp: opp for which voltage has to be returned for
  78. *
  79. * Return: voltage in micro volt corresponding to the opp, else
  80. * return 0
  81. *
  82. * Locking: This function must be called under rcu_read_lock(). opp is a rcu
  83. * protected pointer. This means that opp which could have been fetched by
  84. * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
  85. * under RCU lock. The pointer returned by the opp_find_freq family must be
  86. * used in the same section as the usage of this function with the pointer
  87. * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
  88. * pointer.
  89. */
  90. unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
  91. {
  92. struct dev_pm_opp *tmp_opp;
  93. unsigned long v = 0;
  94. opp_rcu_lockdep_assert();
  95. tmp_opp = rcu_dereference(opp);
  96. if (IS_ERR_OR_NULL(tmp_opp))
  97. pr_err("%s: Invalid parameters\n", __func__);
  98. else
  99. v = tmp_opp->u_volt;
  100. return v;
  101. }
  102. EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage);
  103. /**
  104. * dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp
  105. * @opp: opp for which frequency has to be returned for
  106. *
  107. * Return: frequency in hertz corresponding to the opp, else
  108. * return 0
  109. *
  110. * Locking: This function must be called under rcu_read_lock(). opp is a rcu
  111. * protected pointer. This means that opp which could have been fetched by
  112. * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
  113. * under RCU lock. The pointer returned by the opp_find_freq family must be
  114. * used in the same section as the usage of this function with the pointer
  115. * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
  116. * pointer.
  117. */
  118. unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
  119. {
  120. struct dev_pm_opp *tmp_opp;
  121. unsigned long f = 0;
  122. opp_rcu_lockdep_assert();
  123. tmp_opp = rcu_dereference(opp);
  124. if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available)
  125. pr_err("%s: Invalid parameters\n", __func__);
  126. else
  127. f = tmp_opp->rate;
  128. return f;
  129. }
  130. EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
  131. /**
  132. * dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not
  133. * @opp: opp for which turbo mode is being verified
  134. *
  135. * Turbo OPPs are not for normal use, and can be enabled (under certain
  136. * conditions) for short duration of times to finish high throughput work
  137. * quickly. Running on them for longer times may overheat the chip.
  138. *
  139. * Return: true if opp is turbo opp, else false.
  140. *
  141. * Locking: This function must be called under rcu_read_lock(). opp is a rcu
  142. * protected pointer. This means that opp which could have been fetched by
  143. * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
  144. * under RCU lock. The pointer returned by the opp_find_freq family must be
  145. * used in the same section as the usage of this function with the pointer
  146. * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
  147. * pointer.
  148. */
  149. bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp)
  150. {
  151. struct dev_pm_opp *tmp_opp;
  152. opp_rcu_lockdep_assert();
  153. tmp_opp = rcu_dereference(opp);
  154. if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available) {
  155. pr_err("%s: Invalid parameters\n", __func__);
  156. return false;
  157. }
  158. return tmp_opp->turbo;
  159. }
  160. EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo);
  161. /**
  162. * dev_pm_opp_get_max_clock_latency() - Get max clock latency in nanoseconds
  163. * @dev: device for which we do this operation
  164. *
  165. * Return: This function returns the max clock latency in nanoseconds.
  166. *
  167. * Locking: This function takes rcu_read_lock().
  168. */
  169. unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
  170. {
  171. struct opp_table *opp_table;
  172. unsigned long clock_latency_ns;
  173. rcu_read_lock();
  174. opp_table = _find_opp_table(dev);
  175. if (IS_ERR(opp_table))
  176. clock_latency_ns = 0;
  177. else
  178. clock_latency_ns = opp_table->clock_latency_ns_max;
  179. rcu_read_unlock();
  180. return clock_latency_ns;
  181. }
  182. EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency);
  183. /**
  184. * dev_pm_opp_get_max_volt_latency() - Get max voltage latency in nanoseconds
  185. * @dev: device for which we do this operation
  186. *
  187. * Return: This function returns the max voltage latency in nanoseconds.
  188. *
  189. * Locking: This function takes rcu_read_lock().
  190. */
  191. unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
  192. {
  193. struct opp_table *opp_table;
  194. struct dev_pm_opp *opp;
  195. struct regulator *reg;
  196. unsigned long latency_ns = 0;
  197. unsigned long min_uV = ~0, max_uV = 0;
  198. int ret;
  199. rcu_read_lock();
  200. opp_table = _find_opp_table(dev);
  201. if (IS_ERR(opp_table)) {
  202. rcu_read_unlock();
  203. return 0;
  204. }
  205. reg = opp_table->regulator;
  206. if (IS_ERR(reg)) {
  207. /* Regulator may not be required for device */
  208. rcu_read_unlock();
  209. return 0;
  210. }
  211. list_for_each_entry_rcu(opp, &opp_table->opp_list, node) {
  212. if (!opp->available)
  213. continue;
  214. if (opp->u_volt_min < min_uV)
  215. min_uV = opp->u_volt_min;
  216. if (opp->u_volt_max > max_uV)
  217. max_uV = opp->u_volt_max;
  218. }
  219. rcu_read_unlock();
  220. /*
  221. * The caller needs to ensure that opp_table (and hence the regulator)
  222. * isn't freed, while we are executing this routine.
  223. */
  224. ret = regulator_set_voltage_time(reg, min_uV, max_uV);
  225. if (ret > 0)
  226. latency_ns = ret * 1000;
  227. return latency_ns;
  228. }
  229. EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_volt_latency);
  230. /**
  231. * dev_pm_opp_get_max_transition_latency() - Get max transition latency in
  232. * nanoseconds
  233. * @dev: device for which we do this operation
  234. *
  235. * Return: This function returns the max transition latency, in nanoseconds, to
  236. * switch from one OPP to other.
  237. *
  238. * Locking: This function takes rcu_read_lock().
  239. */
  240. unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev)
  241. {
  242. return dev_pm_opp_get_max_volt_latency(dev) +
  243. dev_pm_opp_get_max_clock_latency(dev);
  244. }
  245. EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_transition_latency);
  246. /**
  247. * dev_pm_opp_get_suspend_opp() - Get suspend opp
  248. * @dev: device for which we do this operation
  249. *
  250. * Return: This function returns pointer to the suspend opp if it is
  251. * defined and available, otherwise it returns NULL.
  252. *
  253. * Locking: This function must be called under rcu_read_lock(). opp is a rcu
  254. * protected pointer. The reason for the same is that the opp pointer which is
  255. * returned will remain valid for use with opp_get_{voltage, freq} only while
  256. * under the locked area. The pointer returned must be used prior to unlocking
  257. * with rcu_read_unlock() to maintain the integrity of the pointer.
  258. */
  259. struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev)
  260. {
  261. struct opp_table *opp_table;
  262. opp_rcu_lockdep_assert();
  263. opp_table = _find_opp_table(dev);
  264. if (IS_ERR(opp_table) || !opp_table->suspend_opp ||
  265. !opp_table->suspend_opp->available)
  266. return NULL;
  267. return opp_table->suspend_opp;
  268. }
  269. EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp);
  270. /**
  271. * dev_pm_opp_get_opp_count() - Get number of opps available in the opp table
  272. * @dev: device for which we do this operation
  273. *
  274. * Return: This function returns the number of available opps if there are any,
  275. * else returns 0 if none or the corresponding error value.
  276. *
  277. * Locking: This function takes rcu_read_lock().
  278. */
  279. int dev_pm_opp_get_opp_count(struct device *dev)
  280. {
  281. struct opp_table *opp_table;
  282. struct dev_pm_opp *temp_opp;
  283. int count = 0;
  284. rcu_read_lock();
  285. opp_table = _find_opp_table(dev);
  286. if (IS_ERR(opp_table)) {
  287. count = PTR_ERR(opp_table);
  288. dev_err(dev, "%s: OPP table not found (%d)\n",
  289. __func__, count);
  290. goto out_unlock;
  291. }
  292. list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
  293. if (temp_opp->available)
  294. count++;
  295. }
  296. out_unlock:
  297. rcu_read_unlock();
  298. return count;
  299. }
  300. EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
  301. /**
  302. * dev_pm_opp_find_freq_exact() - search for an exact frequency
  303. * @dev: device for which we do this operation
  304. * @freq: frequency to search for
  305. * @available: true/false - match for available opp
  306. *
  307. * Return: Searches for exact match in the opp table and returns pointer to the
  308. * matching opp if found, else returns ERR_PTR in case of error and should
  309. * be handled using IS_ERR. Error return values can be:
  310. * EINVAL: for bad pointer
  311. * ERANGE: no match found for search
  312. * ENODEV: if device not found in list of registered devices
  313. *
  314. * Note: available is a modifier for the search. if available=true, then the
  315. * match is for exact matching frequency and is available in the stored OPP
  316. * table. if false, the match is for exact frequency which is not available.
  317. *
  318. * This provides a mechanism to enable an opp which is not available currently
  319. * or the opposite as well.
  320. *
  321. * Locking: This function must be called under rcu_read_lock(). opp is a rcu
  322. * protected pointer. The reason for the same is that the opp pointer which is
  323. * returned will remain valid for use with opp_get_{voltage, freq} only while
  324. * under the locked area. The pointer returned must be used prior to unlocking
  325. * with rcu_read_unlock() to maintain the integrity of the pointer.
  326. */
  327. struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
  328. unsigned long freq,
  329. bool available)
  330. {
  331. struct opp_table *opp_table;
  332. struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
  333. opp_rcu_lockdep_assert();
  334. opp_table = _find_opp_table(dev);
  335. if (IS_ERR(opp_table)) {
  336. int r = PTR_ERR(opp_table);
  337. dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r);
  338. return ERR_PTR(r);
  339. }
  340. list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
  341. if (temp_opp->available == available &&
  342. temp_opp->rate == freq) {
  343. opp = temp_opp;
  344. break;
  345. }
  346. }
  347. return opp;
  348. }
  349. EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
  350. static noinline struct dev_pm_opp *_find_freq_ceil(struct opp_table *opp_table,
  351. unsigned long *freq)
  352. {
  353. struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
  354. list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
  355. if (temp_opp->available && temp_opp->rate >= *freq) {
  356. opp = temp_opp;
  357. *freq = opp->rate;
  358. break;
  359. }
  360. }
  361. return opp;
  362. }
  363. /**
  364. * dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq
  365. * @dev: device for which we do this operation
  366. * @freq: Start frequency
  367. *
  368. * Search for the matching ceil *available* OPP from a starting freq
  369. * for a device.
  370. *
  371. * Return: matching *opp and refreshes *freq accordingly, else returns
  372. * ERR_PTR in case of error and should be handled using IS_ERR. Error return
  373. * values can be:
  374. * EINVAL: for bad pointer
  375. * ERANGE: no match found for search
  376. * ENODEV: if device not found in list of registered devices
  377. *
  378. * Locking: This function must be called under rcu_read_lock(). opp is a rcu
  379. * protected pointer. The reason for the same is that the opp pointer which is
  380. * returned will remain valid for use with opp_get_{voltage, freq} only while
  381. * under the locked area. The pointer returned must be used prior to unlocking
  382. * with rcu_read_unlock() to maintain the integrity of the pointer.
  383. */
  384. struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
  385. unsigned long *freq)
  386. {
  387. struct opp_table *opp_table;
  388. opp_rcu_lockdep_assert();
  389. if (!dev || !freq) {
  390. dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
  391. return ERR_PTR(-EINVAL);
  392. }
  393. opp_table = _find_opp_table(dev);
  394. if (IS_ERR(opp_table))
  395. return ERR_CAST(opp_table);
  396. return _find_freq_ceil(opp_table, freq);
  397. }
  398. EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
  399. /**
  400. * dev_pm_opp_find_freq_floor() - Search for a rounded floor freq
  401. * @dev: device for which we do this operation
  402. * @freq: Start frequency
  403. *
  404. * Search for the matching floor *available* OPP from a starting freq
  405. * for a device.
  406. *
  407. * Return: matching *opp and refreshes *freq accordingly, else returns
  408. * ERR_PTR in case of error and should be handled using IS_ERR. Error return
  409. * values can be:
  410. * EINVAL: for bad pointer
  411. * ERANGE: no match found for search
  412. * ENODEV: if device not found in list of registered devices
  413. *
  414. * Locking: This function must be called under rcu_read_lock(). opp is a rcu
  415. * protected pointer. The reason for the same is that the opp pointer which is
  416. * returned will remain valid for use with opp_get_{voltage, freq} only while
  417. * under the locked area. The pointer returned must be used prior to unlocking
  418. * with rcu_read_unlock() to maintain the integrity of the pointer.
  419. */
  420. struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
  421. unsigned long *freq)
  422. {
  423. struct opp_table *opp_table;
  424. struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
  425. opp_rcu_lockdep_assert();
  426. if (!dev || !freq) {
  427. dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
  428. return ERR_PTR(-EINVAL);
  429. }
  430. opp_table = _find_opp_table(dev);
  431. if (IS_ERR(opp_table))
  432. return ERR_CAST(opp_table);
  433. list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
  434. if (temp_opp->available) {
  435. /* go to the next node, before choosing prev */
  436. if (temp_opp->rate > *freq)
  437. break;
  438. else
  439. opp = temp_opp;
  440. }
  441. }
  442. if (!IS_ERR(opp))
  443. *freq = opp->rate;
  444. return opp;
  445. }
  446. EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
  447. /*
  448. * The caller needs to ensure that opp_table (and hence the clk) isn't freed,
  449. * while clk returned here is used.
  450. */
  451. static struct clk *_get_opp_clk(struct device *dev)
  452. {
  453. struct opp_table *opp_table;
  454. struct clk *clk;
  455. rcu_read_lock();
  456. opp_table = _find_opp_table(dev);
  457. if (IS_ERR(opp_table)) {
  458. dev_err(dev, "%s: device opp doesn't exist\n", __func__);
  459. clk = ERR_CAST(opp_table);
  460. goto unlock;
  461. }
  462. clk = opp_table->clk;
  463. if (IS_ERR(clk))
  464. dev_err(dev, "%s: No clock available for the device\n",
  465. __func__);
  466. unlock:
  467. rcu_read_unlock();
  468. return clk;
  469. }
  470. static int _set_opp_voltage(struct device *dev, struct regulator *reg,
  471. unsigned long u_volt, unsigned long u_volt_min,
  472. unsigned long u_volt_max)
  473. {
  474. int ret;
  475. /* Regulator not available for device */
  476. if (IS_ERR(reg)) {
  477. dev_dbg(dev, "%s: regulator not available: %ld\n", __func__,
  478. PTR_ERR(reg));
  479. return 0;
  480. }
  481. dev_dbg(dev, "%s: voltages (mV): %lu %lu %lu\n", __func__, u_volt_min,
  482. u_volt, u_volt_max);
  483. ret = regulator_set_voltage_triplet(reg, u_volt_min, u_volt,
  484. u_volt_max);
  485. if (ret)
  486. dev_err(dev, "%s: failed to set voltage (%lu %lu %lu mV): %d\n",
  487. __func__, u_volt_min, u_volt, u_volt_max, ret);
  488. return ret;
  489. }
  490. /**
  491. * dev_pm_opp_set_rate() - Configure new OPP based on frequency
  492. * @dev: device for which we do this operation
  493. * @target_freq: frequency to achieve
  494. *
  495. * This configures the power-supplies and clock source to the levels specified
  496. * by the OPP corresponding to the target_freq.
  497. *
  498. * Locking: This function takes rcu_read_lock().
  499. */
  500. int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
  501. {
  502. struct opp_table *opp_table;
  503. struct dev_pm_opp *old_opp, *opp;
  504. struct regulator *reg;
  505. struct clk *clk;
  506. unsigned long freq, old_freq;
  507. unsigned long u_volt, u_volt_min, u_volt_max;
  508. unsigned long ou_volt, ou_volt_min, ou_volt_max;
  509. int ret;
  510. if (unlikely(!target_freq)) {
  511. dev_err(dev, "%s: Invalid target frequency %lu\n", __func__,
  512. target_freq);
  513. return -EINVAL;
  514. }
  515. clk = _get_opp_clk(dev);
  516. if (IS_ERR(clk))
  517. return PTR_ERR(clk);
  518. freq = clk_round_rate(clk, target_freq);
  519. if ((long)freq <= 0)
  520. freq = target_freq;
  521. old_freq = clk_get_rate(clk);
  522. /* Return early if nothing to do */
  523. if (old_freq == freq) {
  524. dev_dbg(dev, "%s: old/new frequencies (%lu Hz) are same, nothing to do\n",
  525. __func__, freq);
  526. return 0;
  527. }
  528. rcu_read_lock();
  529. opp_table = _find_opp_table(dev);
  530. if (IS_ERR(opp_table)) {
  531. dev_err(dev, "%s: device opp doesn't exist\n", __func__);
  532. rcu_read_unlock();
  533. return PTR_ERR(opp_table);
  534. }
  535. old_opp = _find_freq_ceil(opp_table, &old_freq);
  536. if (!IS_ERR(old_opp)) {
  537. ou_volt = old_opp->u_volt;
  538. ou_volt_min = old_opp->u_volt_min;
  539. ou_volt_max = old_opp->u_volt_max;
  540. } else {
  541. dev_err(dev, "%s: failed to find current OPP for freq %lu (%ld)\n",
  542. __func__, old_freq, PTR_ERR(old_opp));
  543. }
  544. opp = _find_freq_ceil(opp_table, &freq);
  545. if (IS_ERR(opp)) {
  546. ret = PTR_ERR(opp);
  547. dev_err(dev, "%s: failed to find OPP for freq %lu (%d)\n",
  548. __func__, freq, ret);
  549. rcu_read_unlock();
  550. return ret;
  551. }
  552. u_volt = opp->u_volt;
  553. u_volt_min = opp->u_volt_min;
  554. u_volt_max = opp->u_volt_max;
  555. reg = opp_table->regulator;
  556. rcu_read_unlock();
  557. /* Scaling up? Scale voltage before frequency */
  558. if (freq > old_freq) {
  559. ret = _set_opp_voltage(dev, reg, u_volt, u_volt_min,
  560. u_volt_max);
  561. if (ret)
  562. goto restore_voltage;
  563. }
  564. /* Change frequency */
  565. dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n",
  566. __func__, old_freq, freq);
  567. ret = clk_set_rate(clk, freq);
  568. if (ret) {
  569. dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
  570. ret);
  571. goto restore_voltage;
  572. }
  573. /* Scaling down? Scale voltage after frequency */
  574. if (freq < old_freq) {
  575. ret = _set_opp_voltage(dev, reg, u_volt, u_volt_min,
  576. u_volt_max);
  577. if (ret)
  578. goto restore_freq;
  579. }
  580. return 0;
  581. restore_freq:
  582. if (clk_set_rate(clk, old_freq))
  583. dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
  584. __func__, old_freq);
  585. restore_voltage:
  586. /* This shouldn't harm even if the voltages weren't updated earlier */
  587. if (!IS_ERR(old_opp))
  588. _set_opp_voltage(dev, reg, ou_volt, ou_volt_min, ou_volt_max);
  589. return ret;
  590. }
  591. EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate);
  592. /* OPP-dev Helpers */
  593. static void _kfree_opp_dev_rcu(struct rcu_head *head)
  594. {
  595. struct opp_device *opp_dev;
  596. opp_dev = container_of(head, struct opp_device, rcu_head);
  597. kfree_rcu(opp_dev, rcu_head);
  598. }
  599. static void _remove_opp_dev(struct opp_device *opp_dev,
  600. struct opp_table *opp_table)
  601. {
  602. opp_debug_unregister(opp_dev, opp_table);
  603. list_del(&opp_dev->node);
  604. call_srcu(&opp_table->srcu_head.srcu, &opp_dev->rcu_head,
  605. _kfree_opp_dev_rcu);
  606. }
  607. struct opp_device *_add_opp_dev(const struct device *dev,
  608. struct opp_table *opp_table)
  609. {
  610. struct opp_device *opp_dev;
  611. int ret;
  612. opp_dev = kzalloc(sizeof(*opp_dev), GFP_KERNEL);
  613. if (!opp_dev)
  614. return NULL;
  615. /* Initialize opp-dev */
  616. opp_dev->dev = dev;
  617. list_add_rcu(&opp_dev->node, &opp_table->dev_list);
  618. /* Create debugfs entries for the opp_table */
  619. ret = opp_debug_register(opp_dev, opp_table);
  620. if (ret)
  621. dev_err(dev, "%s: Failed to register opp debugfs (%d)\n",
  622. __func__, ret);
  623. return opp_dev;
  624. }
  625. /**
  626. * _add_opp_table() - Find OPP table or allocate a new one
  627. * @dev: device for which we do this operation
  628. *
  629. * It tries to find an existing table first, if it couldn't find one, it
  630. * allocates a new OPP table and returns that.
  631. *
  632. * Return: valid opp_table pointer if success, else NULL.
  633. */
  634. static struct opp_table *_add_opp_table(struct device *dev)
  635. {
  636. struct opp_table *opp_table;
  637. struct opp_device *opp_dev;
  638. int ret;
  639. /* Check for existing table for 'dev' first */
  640. opp_table = _find_opp_table(dev);
  641. if (!IS_ERR(opp_table))
  642. return opp_table;
  643. /*
  644. * Allocate a new OPP table. In the infrequent case where a new
  645. * device is needed to be added, we pay this penalty.
  646. */
  647. opp_table = kzalloc(sizeof(*opp_table), GFP_KERNEL);
  648. if (!opp_table)
  649. return NULL;
  650. INIT_LIST_HEAD(&opp_table->dev_list);
  651. opp_dev = _add_opp_dev(dev, opp_table);
  652. if (!opp_dev) {
  653. kfree(opp_table);
  654. return NULL;
  655. }
  656. _of_init_opp_table(opp_table, dev);
  657. /* Set regulator to a non-NULL error value */
  658. opp_table->regulator = ERR_PTR(-ENXIO);
  659. /* Find clk for the device */
  660. opp_table->clk = clk_get(dev, NULL);
  661. if (IS_ERR(opp_table->clk)) {
  662. ret = PTR_ERR(opp_table->clk);
  663. if (ret != -EPROBE_DEFER)
  664. dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__,
  665. ret);
  666. }
  667. srcu_init_notifier_head(&opp_table->srcu_head);
  668. INIT_LIST_HEAD(&opp_table->opp_list);
  669. /* Secure the device table modification */
  670. list_add_rcu(&opp_table->node, &opp_tables);
  671. return opp_table;
  672. }
  673. /**
  674. * _kfree_device_rcu() - Free opp_table RCU handler
  675. * @head: RCU head
  676. */
  677. static void _kfree_device_rcu(struct rcu_head *head)
  678. {
  679. struct opp_table *opp_table = container_of(head, struct opp_table,
  680. rcu_head);
  681. kfree_rcu(opp_table, rcu_head);
  682. }
  683. /**
  684. * _remove_opp_table() - Removes a OPP table
  685. * @opp_table: OPP table to be removed.
  686. *
  687. * Removes/frees OPP table if it doesn't contain any OPPs.
  688. */
  689. static void _remove_opp_table(struct opp_table *opp_table)
  690. {
  691. struct opp_device *opp_dev;
  692. if (!list_empty(&opp_table->opp_list))
  693. return;
  694. if (opp_table->supported_hw)
  695. return;
  696. if (opp_table->prop_name)
  697. return;
  698. if (!IS_ERR(opp_table->regulator))
  699. return;
  700. /* Release clk */
  701. if (!IS_ERR(opp_table->clk))
  702. clk_put(opp_table->clk);
  703. opp_dev = list_first_entry(&opp_table->dev_list, struct opp_device,
  704. node);
  705. _remove_opp_dev(opp_dev, opp_table);
  706. /* dev_list must be empty now */
  707. WARN_ON(!list_empty(&opp_table->dev_list));
  708. list_del_rcu(&opp_table->node);
  709. call_srcu(&opp_table->srcu_head.srcu, &opp_table->rcu_head,
  710. _kfree_device_rcu);
  711. }
  712. /**
  713. * _kfree_opp_rcu() - Free OPP RCU handler
  714. * @head: RCU head
  715. */
  716. static void _kfree_opp_rcu(struct rcu_head *head)
  717. {
  718. struct dev_pm_opp *opp = container_of(head, struct dev_pm_opp, rcu_head);
  719. kfree_rcu(opp, rcu_head);
  720. }
  721. /**
  722. * _opp_remove() - Remove an OPP from a table definition
  723. * @opp_table: points back to the opp_table struct this opp belongs to
  724. * @opp: pointer to the OPP to remove
  725. * @notify: OPP_EVENT_REMOVE notification should be sent or not
  726. *
  727. * This function removes an opp definition from the opp table.
  728. *
  729. * Locking: The internal opp_table and opp structures are RCU protected.
  730. * It is assumed that the caller holds required mutex for an RCU updater
  731. * strategy.
  732. */
  733. void _opp_remove(struct opp_table *opp_table, struct dev_pm_opp *opp,
  734. bool notify)
  735. {
  736. /*
  737. * Notify the changes in the availability of the operable
  738. * frequency/voltage list.
  739. */
  740. if (notify)
  741. srcu_notifier_call_chain(&opp_table->srcu_head,
  742. OPP_EVENT_REMOVE, opp);
  743. opp_debug_remove_one(opp);
  744. list_del_rcu(&opp->node);
  745. call_srcu(&opp_table->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
  746. _remove_opp_table(opp_table);
  747. }
  748. /**
  749. * dev_pm_opp_remove() - Remove an OPP from OPP table
  750. * @dev: device for which we do this operation
  751. * @freq: OPP to remove with matching 'freq'
  752. *
  753. * This function removes an opp from the opp table.
  754. *
  755. * Locking: The internal opp_table and opp structures are RCU protected.
  756. * Hence this function internally uses RCU updater strategy with mutex locks
  757. * to keep the integrity of the internal data structures. Callers should ensure
  758. * that this function is *NOT* called under RCU protection or in contexts where
  759. * mutex cannot be locked.
  760. */
  761. void dev_pm_opp_remove(struct device *dev, unsigned long freq)
  762. {
  763. struct dev_pm_opp *opp;
  764. struct opp_table *opp_table;
  765. bool found = false;
  766. /* Hold our table modification lock here */
  767. mutex_lock(&opp_table_lock);
  768. opp_table = _find_opp_table(dev);
  769. if (IS_ERR(opp_table))
  770. goto unlock;
  771. list_for_each_entry(opp, &opp_table->opp_list, node) {
  772. if (opp->rate == freq) {
  773. found = true;
  774. break;
  775. }
  776. }
  777. if (!found) {
  778. dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n",
  779. __func__, freq);
  780. goto unlock;
  781. }
  782. _opp_remove(opp_table, opp, true);
  783. unlock:
  784. mutex_unlock(&opp_table_lock);
  785. }
  786. EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
  787. struct dev_pm_opp *_allocate_opp(struct device *dev,
  788. struct opp_table **opp_table)
  789. {
  790. struct dev_pm_opp *opp;
  791. /* allocate new OPP node */
  792. opp = kzalloc(sizeof(*opp), GFP_KERNEL);
  793. if (!opp)
  794. return NULL;
  795. INIT_LIST_HEAD(&opp->node);
  796. *opp_table = _add_opp_table(dev);
  797. if (!*opp_table) {
  798. kfree(opp);
  799. return NULL;
  800. }
  801. return opp;
  802. }
  803. static bool _opp_supported_by_regulators(struct dev_pm_opp *opp,
  804. struct opp_table *opp_table)
  805. {
  806. struct regulator *reg = opp_table->regulator;
  807. if (!IS_ERR(reg) &&
  808. !regulator_is_supported_voltage(reg, opp->u_volt_min,
  809. opp->u_volt_max)) {
  810. pr_warn("%s: OPP minuV: %lu maxuV: %lu, not supported by regulator\n",
  811. __func__, opp->u_volt_min, opp->u_volt_max);
  812. return false;
  813. }
  814. return true;
  815. }
  816. int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
  817. struct opp_table *opp_table)
  818. {
  819. struct dev_pm_opp *opp;
  820. struct list_head *head = &opp_table->opp_list;
  821. int ret;
  822. /*
  823. * Insert new OPP in order of increasing frequency and discard if
  824. * already present.
  825. *
  826. * Need to use &opp_table->opp_list in the condition part of the 'for'
  827. * loop, don't replace it with head otherwise it will become an infinite
  828. * loop.
  829. */
  830. list_for_each_entry_rcu(opp, &opp_table->opp_list, node) {
  831. if (new_opp->rate > opp->rate) {
  832. head = &opp->node;
  833. continue;
  834. }
  835. if (new_opp->rate < opp->rate)
  836. break;
  837. /* Duplicate OPPs */
  838. dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n",
  839. __func__, opp->rate, opp->u_volt, opp->available,
  840. new_opp->rate, new_opp->u_volt, new_opp->available);
  841. return opp->available && new_opp->u_volt == opp->u_volt ?
  842. 0 : -EEXIST;
  843. }
  844. new_opp->opp_table = opp_table;
  845. list_add_rcu(&new_opp->node, head);
  846. ret = opp_debug_create_one(new_opp, opp_table);
  847. if (ret)
  848. dev_err(dev, "%s: Failed to register opp to debugfs (%d)\n",
  849. __func__, ret);
  850. if (!_opp_supported_by_regulators(new_opp, opp_table)) {
  851. new_opp->available = false;
  852. dev_warn(dev, "%s: OPP not supported by regulators (%lu)\n",
  853. __func__, new_opp->rate);
  854. }
  855. return 0;
  856. }
  857. /**
  858. * _opp_add_v1() - Allocate a OPP based on v1 bindings.
  859. * @dev: device for which we do this operation
  860. * @freq: Frequency in Hz for this OPP
  861. * @u_volt: Voltage in uVolts for this OPP
  862. * @dynamic: Dynamically added OPPs.
  863. *
  864. * This function adds an opp definition to the opp table and returns status.
  865. * The opp is made available by default and it can be controlled using
  866. * dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove.
  867. *
  868. * NOTE: "dynamic" parameter impacts OPPs added by the dev_pm_opp_of_add_table
  869. * and freed by dev_pm_opp_of_remove_table.
  870. *
  871. * Locking: The internal opp_table and opp structures are RCU protected.
  872. * Hence this function internally uses RCU updater strategy with mutex locks
  873. * to keep the integrity of the internal data structures. Callers should ensure
  874. * that this function is *NOT* called under RCU protection or in contexts where
  875. * mutex cannot be locked.
  876. *
  877. * Return:
  878. * 0 On success OR
  879. * Duplicate OPPs (both freq and volt are same) and opp->available
  880. * -EEXIST Freq are same and volt are different OR
  881. * Duplicate OPPs (both freq and volt are same) and !opp->available
  882. * -ENOMEM Memory allocation failure
  883. */
  884. int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt,
  885. bool dynamic)
  886. {
  887. struct opp_table *opp_table;
  888. struct dev_pm_opp *new_opp;
  889. unsigned long tol;
  890. int ret;
  891. /* Hold our table modification lock here */
  892. mutex_lock(&opp_table_lock);
  893. new_opp = _allocate_opp(dev, &opp_table);
  894. if (!new_opp) {
  895. ret = -ENOMEM;
  896. goto unlock;
  897. }
  898. /* populate the opp table */
  899. new_opp->rate = freq;
  900. tol = u_volt * opp_table->voltage_tolerance_v1 / 100;
  901. new_opp->u_volt = u_volt;
  902. new_opp->u_volt_min = u_volt - tol;
  903. new_opp->u_volt_max = u_volt + tol;
  904. new_opp->available = true;
  905. new_opp->dynamic = dynamic;
  906. ret = _opp_add(dev, new_opp, opp_table);
  907. if (ret)
  908. goto free_opp;
  909. mutex_unlock(&opp_table_lock);
  910. /*
  911. * Notify the changes in the availability of the operable
  912. * frequency/voltage list.
  913. */
  914. srcu_notifier_call_chain(&opp_table->srcu_head, OPP_EVENT_ADD, new_opp);
  915. return 0;
  916. free_opp:
  917. _opp_remove(opp_table, new_opp, false);
  918. unlock:
  919. mutex_unlock(&opp_table_lock);
  920. return ret;
  921. }
  922. /**
  923. * dev_pm_opp_set_supported_hw() - Set supported platforms
  924. * @dev: Device for which supported-hw has to be set.
  925. * @versions: Array of hierarchy of versions to match.
  926. * @count: Number of elements in the array.
  927. *
  928. * This is required only for the V2 bindings, and it enables a platform to
  929. * specify the hierarchy of versions it supports. OPP layer will then enable
  930. * OPPs, which are available for those versions, based on its 'opp-supported-hw'
  931. * property.
  932. *
  933. * Locking: The internal opp_table and opp structures are RCU protected.
  934. * Hence this function internally uses RCU updater strategy with mutex locks
  935. * to keep the integrity of the internal data structures. Callers should ensure
  936. * that this function is *NOT* called under RCU protection or in contexts where
  937. * mutex cannot be locked.
  938. */
  939. int dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions,
  940. unsigned int count)
  941. {
  942. struct opp_table *opp_table;
  943. int ret = 0;
  944. /* Hold our table modification lock here */
  945. mutex_lock(&opp_table_lock);
  946. opp_table = _add_opp_table(dev);
  947. if (!opp_table) {
  948. ret = -ENOMEM;
  949. goto unlock;
  950. }
  951. /* Make sure there are no concurrent readers while updating opp_table */
  952. WARN_ON(!list_empty(&opp_table->opp_list));
  953. /* Do we already have a version hierarchy associated with opp_table? */
  954. if (opp_table->supported_hw) {
  955. dev_err(dev, "%s: Already have supported hardware list\n",
  956. __func__);
  957. ret = -EBUSY;
  958. goto err;
  959. }
  960. opp_table->supported_hw = kmemdup(versions, count * sizeof(*versions),
  961. GFP_KERNEL);
  962. if (!opp_table->supported_hw) {
  963. ret = -ENOMEM;
  964. goto err;
  965. }
  966. opp_table->supported_hw_count = count;
  967. mutex_unlock(&opp_table_lock);
  968. return 0;
  969. err:
  970. _remove_opp_table(opp_table);
  971. unlock:
  972. mutex_unlock(&opp_table_lock);
  973. return ret;
  974. }
  975. EXPORT_SYMBOL_GPL(dev_pm_opp_set_supported_hw);
  976. /**
  977. * dev_pm_opp_put_supported_hw() - Releases resources blocked for supported hw
  978. * @dev: Device for which supported-hw has to be put.
  979. *
  980. * This is required only for the V2 bindings, and is called for a matching
  981. * dev_pm_opp_set_supported_hw(). Until this is called, the opp_table structure
  982. * will not be freed.
  983. *
  984. * Locking: The internal opp_table and opp structures are RCU protected.
  985. * Hence this function internally uses RCU updater strategy with mutex locks
  986. * to keep the integrity of the internal data structures. Callers should ensure
  987. * that this function is *NOT* called under RCU protection or in contexts where
  988. * mutex cannot be locked.
  989. */
  990. void dev_pm_opp_put_supported_hw(struct device *dev)
  991. {
  992. struct opp_table *opp_table;
  993. /* Hold our table modification lock here */
  994. mutex_lock(&opp_table_lock);
  995. /* Check for existing table for 'dev' first */
  996. opp_table = _find_opp_table(dev);
  997. if (IS_ERR(opp_table)) {
  998. dev_err(dev, "Failed to find opp_table: %ld\n",
  999. PTR_ERR(opp_table));
  1000. goto unlock;
  1001. }
  1002. /* Make sure there are no concurrent readers while updating opp_table */
  1003. WARN_ON(!list_empty(&opp_table->opp_list));
  1004. if (!opp_table->supported_hw) {
  1005. dev_err(dev, "%s: Doesn't have supported hardware list\n",
  1006. __func__);
  1007. goto unlock;
  1008. }
  1009. kfree(opp_table->supported_hw);
  1010. opp_table->supported_hw = NULL;
  1011. opp_table->supported_hw_count = 0;
  1012. /* Try freeing opp_table if this was the last blocking resource */
  1013. _remove_opp_table(opp_table);
  1014. unlock:
  1015. mutex_unlock(&opp_table_lock);
  1016. }
  1017. EXPORT_SYMBOL_GPL(dev_pm_opp_put_supported_hw);
  1018. /**
  1019. * dev_pm_opp_set_prop_name() - Set prop-extn name
  1020. * @dev: Device for which the prop-name has to be set.
  1021. * @name: name to postfix to properties.
  1022. *
  1023. * This is required only for the V2 bindings, and it enables a platform to
  1024. * specify the extn to be used for certain property names. The properties to
  1025. * which the extension will apply are opp-microvolt and opp-microamp. OPP core
  1026. * should postfix the property name with -<name> while looking for them.
  1027. *
  1028. * Locking: The internal opp_table and opp structures are RCU protected.
  1029. * Hence this function internally uses RCU updater strategy with mutex locks
  1030. * to keep the integrity of the internal data structures. Callers should ensure
  1031. * that this function is *NOT* called under RCU protection or in contexts where
  1032. * mutex cannot be locked.
  1033. */
  1034. int dev_pm_opp_set_prop_name(struct device *dev, const char *name)
  1035. {
  1036. struct opp_table *opp_table;
  1037. int ret = 0;
  1038. /* Hold our table modification lock here */
  1039. mutex_lock(&opp_table_lock);
  1040. opp_table = _add_opp_table(dev);
  1041. if (!opp_table) {
  1042. ret = -ENOMEM;
  1043. goto unlock;
  1044. }
  1045. /* Make sure there are no concurrent readers while updating opp_table */
  1046. WARN_ON(!list_empty(&opp_table->opp_list));
  1047. /* Do we already have a prop-name associated with opp_table? */
  1048. if (opp_table->prop_name) {
  1049. dev_err(dev, "%s: Already have prop-name %s\n", __func__,
  1050. opp_table->prop_name);
  1051. ret = -EBUSY;
  1052. goto err;
  1053. }
  1054. opp_table->prop_name = kstrdup(name, GFP_KERNEL);
  1055. if (!opp_table->prop_name) {
  1056. ret = -ENOMEM;
  1057. goto err;
  1058. }
  1059. mutex_unlock(&opp_table_lock);
  1060. return 0;
  1061. err:
  1062. _remove_opp_table(opp_table);
  1063. unlock:
  1064. mutex_unlock(&opp_table_lock);
  1065. return ret;
  1066. }
  1067. EXPORT_SYMBOL_GPL(dev_pm_opp_set_prop_name);
  1068. /**
  1069. * dev_pm_opp_put_prop_name() - Releases resources blocked for prop-name
  1070. * @dev: Device for which the prop-name has to be put.
  1071. *
  1072. * This is required only for the V2 bindings, and is called for a matching
  1073. * dev_pm_opp_set_prop_name(). Until this is called, the opp_table structure
  1074. * will not be freed.
  1075. *
  1076. * Locking: The internal opp_table and opp structures are RCU protected.
  1077. * Hence this function internally uses RCU updater strategy with mutex locks
  1078. * to keep the integrity of the internal data structures. Callers should ensure
  1079. * that this function is *NOT* called under RCU protection or in contexts where
  1080. * mutex cannot be locked.
  1081. */
  1082. void dev_pm_opp_put_prop_name(struct device *dev)
  1083. {
  1084. struct opp_table *opp_table;
  1085. /* Hold our table modification lock here */
  1086. mutex_lock(&opp_table_lock);
  1087. /* Check for existing table for 'dev' first */
  1088. opp_table = _find_opp_table(dev);
  1089. if (IS_ERR(opp_table)) {
  1090. dev_err(dev, "Failed to find opp_table: %ld\n",
  1091. PTR_ERR(opp_table));
  1092. goto unlock;
  1093. }
  1094. /* Make sure there are no concurrent readers while updating opp_table */
  1095. WARN_ON(!list_empty(&opp_table->opp_list));
  1096. if (!opp_table->prop_name) {
  1097. dev_err(dev, "%s: Doesn't have a prop-name\n", __func__);
  1098. goto unlock;
  1099. }
  1100. kfree(opp_table->prop_name);
  1101. opp_table->prop_name = NULL;
  1102. /* Try freeing opp_table if this was the last blocking resource */
  1103. _remove_opp_table(opp_table);
  1104. unlock:
  1105. mutex_unlock(&opp_table_lock);
  1106. }
  1107. EXPORT_SYMBOL_GPL(dev_pm_opp_put_prop_name);
  1108. /**
  1109. * dev_pm_opp_set_regulator() - Set regulator name for the device
  1110. * @dev: Device for which regulator name is being set.
  1111. * @name: Name of the regulator.
  1112. *
  1113. * In order to support OPP switching, OPP layer needs to know the name of the
  1114. * device's regulator, as the core would be required to switch voltages as well.
  1115. *
  1116. * This must be called before any OPPs are initialized for the device.
  1117. *
  1118. * Locking: The internal opp_table and opp structures are RCU protected.
  1119. * Hence this function internally uses RCU updater strategy with mutex locks
  1120. * to keep the integrity of the internal data structures. Callers should ensure
  1121. * that this function is *NOT* called under RCU protection or in contexts where
  1122. * mutex cannot be locked.
  1123. */
  1124. int dev_pm_opp_set_regulator(struct device *dev, const char *name)
  1125. {
  1126. struct opp_table *opp_table;
  1127. struct regulator *reg;
  1128. int ret;
  1129. mutex_lock(&opp_table_lock);
  1130. opp_table = _add_opp_table(dev);
  1131. if (!opp_table) {
  1132. ret = -ENOMEM;
  1133. goto unlock;
  1134. }
  1135. /* This should be called before OPPs are initialized */
  1136. if (WARN_ON(!list_empty(&opp_table->opp_list))) {
  1137. ret = -EBUSY;
  1138. goto err;
  1139. }
  1140. /* Already have a regulator set */
  1141. if (WARN_ON(!IS_ERR(opp_table->regulator))) {
  1142. ret = -EBUSY;
  1143. goto err;
  1144. }
  1145. /* Allocate the regulator */
  1146. reg = regulator_get_optional(dev, name);
  1147. if (IS_ERR(reg)) {
  1148. ret = PTR_ERR(reg);
  1149. if (ret != -EPROBE_DEFER)
  1150. dev_err(dev, "%s: no regulator (%s) found: %d\n",
  1151. __func__, name, ret);
  1152. goto err;
  1153. }
  1154. opp_table->regulator = reg;
  1155. mutex_unlock(&opp_table_lock);
  1156. return 0;
  1157. err:
  1158. _remove_opp_table(opp_table);
  1159. unlock:
  1160. mutex_unlock(&opp_table_lock);
  1161. return ret;
  1162. }
  1163. EXPORT_SYMBOL_GPL(dev_pm_opp_set_regulator);
  1164. /**
  1165. * dev_pm_opp_put_regulator() - Releases resources blocked for regulator
  1166. * @dev: Device for which regulator was set.
  1167. *
  1168. * Locking: The internal opp_table and opp structures are RCU protected.
  1169. * Hence this function internally uses RCU updater strategy with mutex locks
  1170. * to keep the integrity of the internal data structures. Callers should ensure
  1171. * that this function is *NOT* called under RCU protection or in contexts where
  1172. * mutex cannot be locked.
  1173. */
  1174. void dev_pm_opp_put_regulator(struct device *dev)
  1175. {
  1176. struct opp_table *opp_table;
  1177. mutex_lock(&opp_table_lock);
  1178. /* Check for existing table for 'dev' first */
  1179. opp_table = _find_opp_table(dev);
  1180. if (IS_ERR(opp_table)) {
  1181. dev_err(dev, "Failed to find opp_table: %ld\n",
  1182. PTR_ERR(opp_table));
  1183. goto unlock;
  1184. }
  1185. if (IS_ERR(opp_table->regulator)) {
  1186. dev_err(dev, "%s: Doesn't have regulator set\n", __func__);
  1187. goto unlock;
  1188. }
  1189. /* Make sure there are no concurrent readers while updating opp_table */
  1190. WARN_ON(!list_empty(&opp_table->opp_list));
  1191. regulator_put(opp_table->regulator);
  1192. opp_table->regulator = ERR_PTR(-ENXIO);
  1193. /* Try freeing opp_table if this was the last blocking resource */
  1194. _remove_opp_table(opp_table);
  1195. unlock:
  1196. mutex_unlock(&opp_table_lock);
  1197. }
  1198. EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulator);
  1199. /**
  1200. * dev_pm_opp_add() - Add an OPP table from a table definitions
  1201. * @dev: device for which we do this operation
  1202. * @freq: Frequency in Hz for this OPP
  1203. * @u_volt: Voltage in uVolts for this OPP
  1204. *
  1205. * This function adds an opp definition to the opp table and returns status.
  1206. * The opp is made available by default and it can be controlled using
  1207. * dev_pm_opp_enable/disable functions.
  1208. *
  1209. * Locking: The internal opp_table and opp structures are RCU protected.
  1210. * Hence this function internally uses RCU updater strategy with mutex locks
  1211. * to keep the integrity of the internal data structures. Callers should ensure
  1212. * that this function is *NOT* called under RCU protection or in contexts where
  1213. * mutex cannot be locked.
  1214. *
  1215. * Return:
  1216. * 0 On success OR
  1217. * Duplicate OPPs (both freq and volt are same) and opp->available
  1218. * -EEXIST Freq are same and volt are different OR
  1219. * Duplicate OPPs (both freq and volt are same) and !opp->available
  1220. * -ENOMEM Memory allocation failure
  1221. */
  1222. int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
  1223. {
  1224. return _opp_add_v1(dev, freq, u_volt, true);
  1225. }
  1226. EXPORT_SYMBOL_GPL(dev_pm_opp_add);
  1227. /**
  1228. * _opp_set_availability() - helper to set the availability of an opp
  1229. * @dev: device for which we do this operation
  1230. * @freq: OPP frequency to modify availability
  1231. * @availability_req: availability status requested for this opp
  1232. *
  1233. * Set the availability of an OPP with an RCU operation, opp_{enable,disable}
  1234. * share a common logic which is isolated here.
  1235. *
  1236. * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
  1237. * copy operation, returns 0 if no modification was done OR modification was
  1238. * successful.
  1239. *
  1240. * Locking: The internal opp_table and opp structures are RCU protected.
  1241. * Hence this function internally uses RCU updater strategy with mutex locks to
  1242. * keep the integrity of the internal data structures. Callers should ensure
  1243. * that this function is *NOT* called under RCU protection or in contexts where
  1244. * mutex locking or synchronize_rcu() blocking calls cannot be used.
  1245. */
  1246. static int _opp_set_availability(struct device *dev, unsigned long freq,
  1247. bool availability_req)
  1248. {
  1249. struct opp_table *opp_table;
  1250. struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
  1251. int r = 0;
  1252. /* keep the node allocated */
  1253. new_opp = kmalloc(sizeof(*new_opp), GFP_KERNEL);
  1254. if (!new_opp)
  1255. return -ENOMEM;
  1256. mutex_lock(&opp_table_lock);
  1257. /* Find the opp_table */
  1258. opp_table = _find_opp_table(dev);
  1259. if (IS_ERR(opp_table)) {
  1260. r = PTR_ERR(opp_table);
  1261. dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
  1262. goto unlock;
  1263. }
  1264. /* Do we have the frequency? */
  1265. list_for_each_entry(tmp_opp, &opp_table->opp_list, node) {
  1266. if (tmp_opp->rate == freq) {
  1267. opp = tmp_opp;
  1268. break;
  1269. }
  1270. }
  1271. if (IS_ERR(opp)) {
  1272. r = PTR_ERR(opp);
  1273. goto unlock;
  1274. }
  1275. /* Is update really needed? */
  1276. if (opp->available == availability_req)
  1277. goto unlock;
  1278. /* copy the old data over */
  1279. *new_opp = *opp;
  1280. /* plug in new node */
  1281. new_opp->available = availability_req;
  1282. list_replace_rcu(&opp->node, &new_opp->node);
  1283. mutex_unlock(&opp_table_lock);
  1284. call_srcu(&opp_table->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
  1285. /* Notify the change of the OPP availability */
  1286. if (availability_req)
  1287. srcu_notifier_call_chain(&opp_table->srcu_head,
  1288. OPP_EVENT_ENABLE, new_opp);
  1289. else
  1290. srcu_notifier_call_chain(&opp_table->srcu_head,
  1291. OPP_EVENT_DISABLE, new_opp);
  1292. return 0;
  1293. unlock:
  1294. mutex_unlock(&opp_table_lock);
  1295. kfree(new_opp);
  1296. return r;
  1297. }
  1298. /**
  1299. * dev_pm_opp_enable() - Enable a specific OPP
  1300. * @dev: device for which we do this operation
  1301. * @freq: OPP frequency to enable
  1302. *
  1303. * Enables a provided opp. If the operation is valid, this returns 0, else the
  1304. * corresponding error value. It is meant to be used for users an OPP available
  1305. * after being temporarily made unavailable with dev_pm_opp_disable.
  1306. *
  1307. * Locking: The internal opp_table and opp structures are RCU protected.
  1308. * Hence this function indirectly uses RCU and mutex locks to keep the
  1309. * integrity of the internal data structures. Callers should ensure that
  1310. * this function is *NOT* called under RCU protection or in contexts where
  1311. * mutex locking or synchronize_rcu() blocking calls cannot be used.
  1312. *
  1313. * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
  1314. * copy operation, returns 0 if no modification was done OR modification was
  1315. * successful.
  1316. */
  1317. int dev_pm_opp_enable(struct device *dev, unsigned long freq)
  1318. {
  1319. return _opp_set_availability(dev, freq, true);
  1320. }
  1321. EXPORT_SYMBOL_GPL(dev_pm_opp_enable);
  1322. /**
  1323. * dev_pm_opp_disable() - Disable a specific OPP
  1324. * @dev: device for which we do this operation
  1325. * @freq: OPP frequency to disable
  1326. *
  1327. * Disables a provided opp. If the operation is valid, this returns
  1328. * 0, else the corresponding error value. It is meant to be a temporary
  1329. * control by users to make this OPP not available until the circumstances are
  1330. * right to make it available again (with a call to dev_pm_opp_enable).
  1331. *
  1332. * Locking: The internal opp_table and opp structures are RCU protected.
  1333. * Hence this function indirectly uses RCU and mutex locks to keep the
  1334. * integrity of the internal data structures. Callers should ensure that
  1335. * this function is *NOT* called under RCU protection or in contexts where
  1336. * mutex locking or synchronize_rcu() blocking calls cannot be used.
  1337. *
  1338. * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
  1339. * copy operation, returns 0 if no modification was done OR modification was
  1340. * successful.
  1341. */
  1342. int dev_pm_opp_disable(struct device *dev, unsigned long freq)
  1343. {
  1344. return _opp_set_availability(dev, freq, false);
  1345. }
  1346. EXPORT_SYMBOL_GPL(dev_pm_opp_disable);
  1347. /**
  1348. * dev_pm_opp_get_notifier() - find notifier_head of the device with opp
  1349. * @dev: device pointer used to lookup OPP table.
  1350. *
  1351. * Return: pointer to notifier head if found, otherwise -ENODEV or
  1352. * -EINVAL based on type of error casted as pointer. value must be checked
  1353. * with IS_ERR to determine valid pointer or error result.
  1354. *
  1355. * Locking: This function must be called under rcu_read_lock(). opp_table is a
  1356. * RCU protected pointer. The reason for the same is that the opp pointer which
  1357. * is returned will remain valid for use with opp_get_{voltage, freq} only while
  1358. * under the locked area. The pointer returned must be used prior to unlocking
  1359. * with rcu_read_unlock() to maintain the integrity of the pointer.
  1360. */
  1361. struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev)
  1362. {
  1363. struct opp_table *opp_table = _find_opp_table(dev);
  1364. if (IS_ERR(opp_table))
  1365. return ERR_CAST(opp_table); /* matching type */
  1366. return &opp_table->srcu_head;
  1367. }
  1368. EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier);
  1369. /*
  1370. * Free OPPs either created using static entries present in DT or even the
  1371. * dynamically added entries based on remove_all param.
  1372. */
  1373. void _dev_pm_opp_remove_table(struct device *dev, bool remove_all)
  1374. {
  1375. struct opp_table *opp_table;
  1376. struct dev_pm_opp *opp, *tmp;
  1377. /* Hold our table modification lock here */
  1378. mutex_lock(&opp_table_lock);
  1379. /* Check for existing table for 'dev' */
  1380. opp_table = _find_opp_table(dev);
  1381. if (IS_ERR(opp_table)) {
  1382. int error = PTR_ERR(opp_table);
  1383. if (error != -ENODEV)
  1384. WARN(1, "%s: opp_table: %d\n",
  1385. IS_ERR_OR_NULL(dev) ?
  1386. "Invalid device" : dev_name(dev),
  1387. error);
  1388. goto unlock;
  1389. }
  1390. /* Find if opp_table manages a single device */
  1391. if (list_is_singular(&opp_table->dev_list)) {
  1392. /* Free static OPPs */
  1393. list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) {
  1394. if (remove_all || !opp->dynamic)
  1395. _opp_remove(opp_table, opp, true);
  1396. }
  1397. } else {
  1398. _remove_opp_dev(_find_opp_dev(dev, opp_table), opp_table);
  1399. }
  1400. unlock:
  1401. mutex_unlock(&opp_table_lock);
  1402. }
  1403. /**
  1404. * dev_pm_opp_remove_table() - Free all OPPs associated with the device
  1405. * @dev: device pointer used to lookup OPP table.
  1406. *
  1407. * Free both OPPs created using static entries present in DT and the
  1408. * dynamically added entries.
  1409. *
  1410. * Locking: The internal opp_table and opp structures are RCU protected.
  1411. * Hence this function indirectly uses RCU updater strategy with mutex locks
  1412. * to keep the integrity of the internal data structures. Callers should ensure
  1413. * that this function is *NOT* called under RCU protection or in contexts where
  1414. * mutex cannot be locked.
  1415. */
  1416. void dev_pm_opp_remove_table(struct device *dev)
  1417. {
  1418. _dev_pm_opp_remove_table(dev, true);
  1419. }
  1420. EXPORT_SYMBOL_GPL(dev_pm_opp_remove_table);