opp.c 45 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603
  1. /*
  2. * Generic OPP Interface
  3. *
  4. * Copyright (C) 2009-2010 Texas Instruments Incorporated.
  5. * Nishanth Menon
  6. * Romit Dasgupta
  7. * Kevin Hilman
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. */
  13. #include <linux/cpu.h>
  14. #include <linux/kernel.h>
  15. #include <linux/errno.h>
  16. #include <linux/err.h>
  17. #include <linux/slab.h>
  18. #include <linux/device.h>
  19. #include <linux/list.h>
  20. #include <linux/rculist.h>
  21. #include <linux/rcupdate.h>
  22. #include <linux/pm_opp.h>
  23. #include <linux/of.h>
  24. #include <linux/export.h>
  25. /*
  26. * Internal data structure organization with the OPP layer library is as
  27. * follows:
  28. * dev_opp_list (root)
  29. * |- device 1 (represents voltage domain 1)
  30. * | |- opp 1 (availability, freq, voltage)
  31. * | |- opp 2 ..
  32. * ... ...
  33. * | `- opp n ..
  34. * |- device 2 (represents the next voltage domain)
  35. * ...
  36. * `- device m (represents mth voltage domain)
  37. * device 1, 2.. are represented by dev_opp structure while each opp
  38. * is represented by the opp structure.
  39. */
  40. /**
  41. * struct dev_pm_opp - Generic OPP description structure
  42. * @node: opp list node. The nodes are maintained throughout the lifetime
  43. * of boot. It is expected only an optimal set of OPPs are
  44. * added to the library by the SoC framework.
  45. * RCU usage: opp list is traversed with RCU locks. node
  46. * modification is possible realtime, hence the modifications
  47. * are protected by the dev_opp_list_lock for integrity.
  48. * IMPORTANT: the opp nodes should be maintained in increasing
  49. * order.
  50. * @dynamic: not-created from static DT entries.
  51. * @available: true/false - marks if this OPP as available or not
  52. * @turbo: true if turbo (boost) OPP
  53. * @rate: Frequency in hertz
  54. * @u_volt: Target voltage in microvolts corresponding to this OPP
  55. * @u_volt_min: Minimum voltage in microvolts corresponding to this OPP
  56. * @u_volt_max: Maximum voltage in microvolts corresponding to this OPP
  57. * @u_amp: Maximum current drawn by the device in microamperes
  58. * @clock_latency_ns: Latency (in nanoseconds) of switching to this OPP's
  59. * frequency from any other OPP's frequency.
  60. * @dev_opp: points back to the device_opp struct this opp belongs to
  61. * @rcu_head: RCU callback head used for deferred freeing
  62. * @np: OPP's device node.
  63. *
  64. * This structure stores the OPP information for a given device.
  65. */
  66. struct dev_pm_opp {
  67. struct list_head node;
  68. bool available;
  69. bool dynamic;
  70. bool turbo;
  71. unsigned long rate;
  72. unsigned long u_volt;
  73. unsigned long u_volt_min;
  74. unsigned long u_volt_max;
  75. unsigned long u_amp;
  76. unsigned long clock_latency_ns;
  77. struct device_opp *dev_opp;
  78. struct rcu_head rcu_head;
  79. struct device_node *np;
  80. };
  81. /**
  82. * struct device_list_opp - devices managed by 'struct device_opp'
  83. * @node: list node
  84. * @dev: device to which the struct object belongs
  85. * @rcu_head: RCU callback head used for deferred freeing
  86. *
  87. * This is an internal data structure maintaining the list of devices that are
  88. * managed by 'struct device_opp'.
  89. */
  90. struct device_list_opp {
  91. struct list_head node;
  92. const struct device *dev;
  93. struct rcu_head rcu_head;
  94. };
  95. /**
  96. * struct device_opp - Device opp structure
  97. * @node: list node - contains the devices with OPPs that
  98. * have been registered. Nodes once added are not modified in this
  99. * list.
  100. * RCU usage: nodes are not modified in the list of device_opp,
  101. * however addition is possible and is secured by dev_opp_list_lock
  102. * @srcu_head: notifier head to notify the OPP availability changes.
  103. * @rcu_head: RCU callback head used for deferred freeing
  104. * @dev_list: list of devices that share these OPPs
  105. * @opp_list: list of opps
  106. * @np: struct device_node pointer for opp's DT node.
  107. * @shared_opp: OPP is shared between multiple devices.
  108. *
  109. * This is an internal data structure maintaining the link to opps attached to
  110. * a device. This structure is not meant to be shared to users as it is
  111. * meant for book keeping and private to OPP library.
  112. *
  113. * Because the opp structures can be used from both rcu and srcu readers, we
  114. * need to wait for the grace period of both of them before freeing any
  115. * resources. And so we have used kfree_rcu() from within call_srcu() handlers.
  116. */
  117. struct device_opp {
  118. struct list_head node;
  119. struct srcu_notifier_head srcu_head;
  120. struct rcu_head rcu_head;
  121. struct list_head dev_list;
  122. struct list_head opp_list;
  123. struct device_node *np;
  124. unsigned long clock_latency_ns_max;
  125. bool shared_opp;
  126. struct dev_pm_opp *suspend_opp;
  127. };
  128. /*
  129. * The root of the list of all devices. All device_opp structures branch off
  130. * from here, with each device_opp containing the list of opp it supports in
  131. * various states of availability.
  132. */
  133. static LIST_HEAD(dev_opp_list);
  134. /* Lock to allow exclusive modification to the device and opp lists */
  135. static DEFINE_MUTEX(dev_opp_list_lock);
  136. #define opp_rcu_lockdep_assert() \
  137. do { \
  138. RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
  139. !lockdep_is_held(&dev_opp_list_lock), \
  140. "Missing rcu_read_lock() or " \
  141. "dev_opp_list_lock protection"); \
  142. } while (0)
  143. static struct device_list_opp *_find_list_dev(const struct device *dev,
  144. struct device_opp *dev_opp)
  145. {
  146. struct device_list_opp *list_dev;
  147. list_for_each_entry(list_dev, &dev_opp->dev_list, node)
  148. if (list_dev->dev == dev)
  149. return list_dev;
  150. return NULL;
  151. }
  152. static struct device_opp *_managed_opp(const struct device_node *np)
  153. {
  154. struct device_opp *dev_opp;
  155. list_for_each_entry_rcu(dev_opp, &dev_opp_list, node) {
  156. if (dev_opp->np == np) {
  157. /*
  158. * Multiple devices can point to the same OPP table and
  159. * so will have same node-pointer, np.
  160. *
  161. * But the OPPs will be considered as shared only if the
  162. * OPP table contains a "opp-shared" property.
  163. */
  164. return dev_opp->shared_opp ? dev_opp : NULL;
  165. }
  166. }
  167. return NULL;
  168. }
  169. /**
  170. * _find_device_opp() - find device_opp struct using device pointer
  171. * @dev: device pointer used to lookup device OPPs
  172. *
  173. * Search list of device OPPs for one containing matching device. Does a RCU
  174. * reader operation to grab the pointer needed.
  175. *
  176. * Return: pointer to 'struct device_opp' if found, otherwise -ENODEV or
  177. * -EINVAL based on type of error.
  178. *
  179. * Locking: This function must be called under rcu_read_lock(). device_opp
  180. * is a RCU protected pointer. This means that device_opp is valid as long
  181. * as we are under RCU lock.
  182. */
  183. static struct device_opp *_find_device_opp(struct device *dev)
  184. {
  185. struct device_opp *dev_opp;
  186. if (IS_ERR_OR_NULL(dev)) {
  187. pr_err("%s: Invalid parameters\n", __func__);
  188. return ERR_PTR(-EINVAL);
  189. }
  190. list_for_each_entry_rcu(dev_opp, &dev_opp_list, node)
  191. if (_find_list_dev(dev, dev_opp))
  192. return dev_opp;
  193. return ERR_PTR(-ENODEV);
  194. }
  195. /**
  196. * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an available opp
  197. * @opp: opp for which voltage has to be returned for
  198. *
  199. * Return: voltage in micro volt corresponding to the opp, else
  200. * return 0
  201. *
  202. * Locking: This function must be called under rcu_read_lock(). opp is a rcu
  203. * protected pointer. This means that opp which could have been fetched by
  204. * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
  205. * under RCU lock. The pointer returned by the opp_find_freq family must be
  206. * used in the same section as the usage of this function with the pointer
  207. * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
  208. * pointer.
  209. */
  210. unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
  211. {
  212. struct dev_pm_opp *tmp_opp;
  213. unsigned long v = 0;
  214. opp_rcu_lockdep_assert();
  215. tmp_opp = rcu_dereference(opp);
  216. if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available)
  217. pr_err("%s: Invalid parameters\n", __func__);
  218. else
  219. v = tmp_opp->u_volt;
  220. return v;
  221. }
  222. EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage);
  223. /**
  224. * dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp
  225. * @opp: opp for which frequency has to be returned for
  226. *
  227. * Return: frequency in hertz corresponding to the opp, else
  228. * return 0
  229. *
  230. * Locking: This function must be called under rcu_read_lock(). opp is a rcu
  231. * protected pointer. This means that opp which could have been fetched by
  232. * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
  233. * under RCU lock. The pointer returned by the opp_find_freq family must be
  234. * used in the same section as the usage of this function with the pointer
  235. * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
  236. * pointer.
  237. */
  238. unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
  239. {
  240. struct dev_pm_opp *tmp_opp;
  241. unsigned long f = 0;
  242. opp_rcu_lockdep_assert();
  243. tmp_opp = rcu_dereference(opp);
  244. if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available)
  245. pr_err("%s: Invalid parameters\n", __func__);
  246. else
  247. f = tmp_opp->rate;
  248. return f;
  249. }
  250. EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
  251. /**
  252. * dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not
  253. * @opp: opp for which turbo mode is being verified
  254. *
  255. * Turbo OPPs are not for normal use, and can be enabled (under certain
  256. * conditions) for short duration of times to finish high throughput work
  257. * quickly. Running on them for longer times may overheat the chip.
  258. *
  259. * Return: true if opp is turbo opp, else false.
  260. *
  261. * Locking: This function must be called under rcu_read_lock(). opp is a rcu
  262. * protected pointer. This means that opp which could have been fetched by
  263. * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
  264. * under RCU lock. The pointer returned by the opp_find_freq family must be
  265. * used in the same section as the usage of this function with the pointer
  266. * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
  267. * pointer.
  268. */
  269. bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp)
  270. {
  271. struct dev_pm_opp *tmp_opp;
  272. opp_rcu_lockdep_assert();
  273. tmp_opp = rcu_dereference(opp);
  274. if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available) {
  275. pr_err("%s: Invalid parameters\n", __func__);
  276. return false;
  277. }
  278. return tmp_opp->turbo;
  279. }
  280. EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo);
  281. /**
  282. * dev_pm_opp_get_max_clock_latency() - Get max clock latency in nanoseconds
  283. * @dev: device for which we do this operation
  284. *
  285. * Return: This function returns the max clock latency in nanoseconds.
  286. *
  287. * Locking: This function takes rcu_read_lock().
  288. */
  289. unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
  290. {
  291. struct device_opp *dev_opp;
  292. unsigned long clock_latency_ns;
  293. rcu_read_lock();
  294. dev_opp = _find_device_opp(dev);
  295. if (IS_ERR(dev_opp))
  296. clock_latency_ns = 0;
  297. else
  298. clock_latency_ns = dev_opp->clock_latency_ns_max;
  299. rcu_read_unlock();
  300. return clock_latency_ns;
  301. }
  302. EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency);
  303. /**
  304. * dev_pm_opp_get_suspend_opp() - Get suspend opp
  305. * @dev: device for which we do this operation
  306. *
  307. * Return: This function returns pointer to the suspend opp if it is
  308. * defined and available, otherwise it returns NULL.
  309. *
  310. * Locking: This function must be called under rcu_read_lock(). opp is a rcu
  311. * protected pointer. The reason for the same is that the opp pointer which is
  312. * returned will remain valid for use with opp_get_{voltage, freq} only while
  313. * under the locked area. The pointer returned must be used prior to unlocking
  314. * with rcu_read_unlock() to maintain the integrity of the pointer.
  315. */
  316. struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev)
  317. {
  318. struct device_opp *dev_opp;
  319. opp_rcu_lockdep_assert();
  320. dev_opp = _find_device_opp(dev);
  321. if (IS_ERR(dev_opp) || !dev_opp->suspend_opp ||
  322. !dev_opp->suspend_opp->available)
  323. return NULL;
  324. return dev_opp->suspend_opp;
  325. }
  326. EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp);
  327. /**
  328. * dev_pm_opp_get_opp_count() - Get number of opps available in the opp list
  329. * @dev: device for which we do this operation
  330. *
  331. * Return: This function returns the number of available opps if there are any,
  332. * else returns 0 if none or the corresponding error value.
  333. *
  334. * Locking: This function takes rcu_read_lock().
  335. */
  336. int dev_pm_opp_get_opp_count(struct device *dev)
  337. {
  338. struct device_opp *dev_opp;
  339. struct dev_pm_opp *temp_opp;
  340. int count = 0;
  341. rcu_read_lock();
  342. dev_opp = _find_device_opp(dev);
  343. if (IS_ERR(dev_opp)) {
  344. count = PTR_ERR(dev_opp);
  345. dev_err(dev, "%s: device OPP not found (%d)\n",
  346. __func__, count);
  347. goto out_unlock;
  348. }
  349. list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
  350. if (temp_opp->available)
  351. count++;
  352. }
  353. out_unlock:
  354. rcu_read_unlock();
  355. return count;
  356. }
  357. EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
  358. /**
  359. * dev_pm_opp_find_freq_exact() - search for an exact frequency
  360. * @dev: device for which we do this operation
  361. * @freq: frequency to search for
  362. * @available: true/false - match for available opp
  363. *
  364. * Return: Searches for exact match in the opp list and returns pointer to the
  365. * matching opp if found, else returns ERR_PTR in case of error and should
  366. * be handled using IS_ERR. Error return values can be:
  367. * EINVAL: for bad pointer
  368. * ERANGE: no match found for search
  369. * ENODEV: if device not found in list of registered devices
  370. *
  371. * Note: available is a modifier for the search. if available=true, then the
  372. * match is for exact matching frequency and is available in the stored OPP
  373. * table. if false, the match is for exact frequency which is not available.
  374. *
  375. * This provides a mechanism to enable an opp which is not available currently
  376. * or the opposite as well.
  377. *
  378. * Locking: This function must be called under rcu_read_lock(). opp is a rcu
  379. * protected pointer. The reason for the same is that the opp pointer which is
  380. * returned will remain valid for use with opp_get_{voltage, freq} only while
  381. * under the locked area. The pointer returned must be used prior to unlocking
  382. * with rcu_read_unlock() to maintain the integrity of the pointer.
  383. */
  384. struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
  385. unsigned long freq,
  386. bool available)
  387. {
  388. struct device_opp *dev_opp;
  389. struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
  390. opp_rcu_lockdep_assert();
  391. dev_opp = _find_device_opp(dev);
  392. if (IS_ERR(dev_opp)) {
  393. int r = PTR_ERR(dev_opp);
  394. dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r);
  395. return ERR_PTR(r);
  396. }
  397. list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
  398. if (temp_opp->available == available &&
  399. temp_opp->rate == freq) {
  400. opp = temp_opp;
  401. break;
  402. }
  403. }
  404. return opp;
  405. }
  406. EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
  407. /**
  408. * dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq
  409. * @dev: device for which we do this operation
  410. * @freq: Start frequency
  411. *
  412. * Search for the matching ceil *available* OPP from a starting freq
  413. * for a device.
  414. *
  415. * Return: matching *opp and refreshes *freq accordingly, else returns
  416. * ERR_PTR in case of error and should be handled using IS_ERR. Error return
  417. * values can be:
  418. * EINVAL: for bad pointer
  419. * ERANGE: no match found for search
  420. * ENODEV: if device not found in list of registered devices
  421. *
  422. * Locking: This function must be called under rcu_read_lock(). opp is a rcu
  423. * protected pointer. The reason for the same is that the opp pointer which is
  424. * returned will remain valid for use with opp_get_{voltage, freq} only while
  425. * under the locked area. The pointer returned must be used prior to unlocking
  426. * with rcu_read_unlock() to maintain the integrity of the pointer.
  427. */
  428. struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
  429. unsigned long *freq)
  430. {
  431. struct device_opp *dev_opp;
  432. struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
  433. opp_rcu_lockdep_assert();
  434. if (!dev || !freq) {
  435. dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
  436. return ERR_PTR(-EINVAL);
  437. }
  438. dev_opp = _find_device_opp(dev);
  439. if (IS_ERR(dev_opp))
  440. return ERR_CAST(dev_opp);
  441. list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
  442. if (temp_opp->available && temp_opp->rate >= *freq) {
  443. opp = temp_opp;
  444. *freq = opp->rate;
  445. break;
  446. }
  447. }
  448. return opp;
  449. }
  450. EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
  451. /**
  452. * dev_pm_opp_find_freq_floor() - Search for a rounded floor freq
  453. * @dev: device for which we do this operation
  454. * @freq: Start frequency
  455. *
  456. * Search for the matching floor *available* OPP from a starting freq
  457. * for a device.
  458. *
  459. * Return: matching *opp and refreshes *freq accordingly, else returns
  460. * ERR_PTR in case of error and should be handled using IS_ERR. Error return
  461. * values can be:
  462. * EINVAL: for bad pointer
  463. * ERANGE: no match found for search
  464. * ENODEV: if device not found in list of registered devices
  465. *
  466. * Locking: This function must be called under rcu_read_lock(). opp is a rcu
  467. * protected pointer. The reason for the same is that the opp pointer which is
  468. * returned will remain valid for use with opp_get_{voltage, freq} only while
  469. * under the locked area. The pointer returned must be used prior to unlocking
  470. * with rcu_read_unlock() to maintain the integrity of the pointer.
  471. */
  472. struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
  473. unsigned long *freq)
  474. {
  475. struct device_opp *dev_opp;
  476. struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
  477. opp_rcu_lockdep_assert();
  478. if (!dev || !freq) {
  479. dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
  480. return ERR_PTR(-EINVAL);
  481. }
  482. dev_opp = _find_device_opp(dev);
  483. if (IS_ERR(dev_opp))
  484. return ERR_CAST(dev_opp);
  485. list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
  486. if (temp_opp->available) {
  487. /* go to the next node, before choosing prev */
  488. if (temp_opp->rate > *freq)
  489. break;
  490. else
  491. opp = temp_opp;
  492. }
  493. }
  494. if (!IS_ERR(opp))
  495. *freq = opp->rate;
  496. return opp;
  497. }
  498. EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
  499. /* List-dev Helpers */
  500. static void _kfree_list_dev_rcu(struct rcu_head *head)
  501. {
  502. struct device_list_opp *list_dev;
  503. list_dev = container_of(head, struct device_list_opp, rcu_head);
  504. kfree_rcu(list_dev, rcu_head);
  505. }
  506. static void _remove_list_dev(struct device_list_opp *list_dev,
  507. struct device_opp *dev_opp)
  508. {
  509. list_del(&list_dev->node);
  510. call_srcu(&dev_opp->srcu_head.srcu, &list_dev->rcu_head,
  511. _kfree_list_dev_rcu);
  512. }
  513. static struct device_list_opp *_add_list_dev(const struct device *dev,
  514. struct device_opp *dev_opp)
  515. {
  516. struct device_list_opp *list_dev;
  517. list_dev = kzalloc(sizeof(*list_dev), GFP_KERNEL);
  518. if (!list_dev)
  519. return NULL;
  520. /* Initialize list-dev */
  521. list_dev->dev = dev;
  522. list_add_rcu(&list_dev->node, &dev_opp->dev_list);
  523. return list_dev;
  524. }
  525. /**
  526. * _add_device_opp() - Find device OPP table or allocate a new one
  527. * @dev: device for which we do this operation
  528. *
  529. * It tries to find an existing table first, if it couldn't find one, it
  530. * allocates a new OPP table and returns that.
  531. *
  532. * Return: valid device_opp pointer if success, else NULL.
  533. */
  534. static struct device_opp *_add_device_opp(struct device *dev)
  535. {
  536. struct device_opp *dev_opp;
  537. struct device_list_opp *list_dev;
  538. /* Check for existing list for 'dev' first */
  539. dev_opp = _find_device_opp(dev);
  540. if (!IS_ERR(dev_opp))
  541. return dev_opp;
  542. /*
  543. * Allocate a new device OPP table. In the infrequent case where a new
  544. * device is needed to be added, we pay this penalty.
  545. */
  546. dev_opp = kzalloc(sizeof(*dev_opp), GFP_KERNEL);
  547. if (!dev_opp)
  548. return NULL;
  549. INIT_LIST_HEAD(&dev_opp->dev_list);
  550. list_dev = _add_list_dev(dev, dev_opp);
  551. if (!list_dev) {
  552. kfree(dev_opp);
  553. return NULL;
  554. }
  555. srcu_init_notifier_head(&dev_opp->srcu_head);
  556. INIT_LIST_HEAD(&dev_opp->opp_list);
  557. /* Secure the device list modification */
  558. list_add_rcu(&dev_opp->node, &dev_opp_list);
  559. return dev_opp;
  560. }
  561. /**
  562. * _kfree_device_rcu() - Free device_opp RCU handler
  563. * @head: RCU head
  564. */
  565. static void _kfree_device_rcu(struct rcu_head *head)
  566. {
  567. struct device_opp *device_opp = container_of(head, struct device_opp, rcu_head);
  568. kfree_rcu(device_opp, rcu_head);
  569. }
  570. /**
  571. * _remove_device_opp() - Removes a device OPP table
  572. * @dev_opp: device OPP table to be removed.
  573. *
  574. * Removes/frees device OPP table it it doesn't contain any OPPs.
  575. */
  576. static void _remove_device_opp(struct device_opp *dev_opp)
  577. {
  578. struct device_list_opp *list_dev;
  579. if (!list_empty(&dev_opp->opp_list))
  580. return;
  581. list_dev = list_first_entry(&dev_opp->dev_list, struct device_list_opp,
  582. node);
  583. _remove_list_dev(list_dev, dev_opp);
  584. /* dev_list must be empty now */
  585. WARN_ON(!list_empty(&dev_opp->dev_list));
  586. list_del_rcu(&dev_opp->node);
  587. call_srcu(&dev_opp->srcu_head.srcu, &dev_opp->rcu_head,
  588. _kfree_device_rcu);
  589. }
  590. /**
  591. * _kfree_opp_rcu() - Free OPP RCU handler
  592. * @head: RCU head
  593. */
  594. static void _kfree_opp_rcu(struct rcu_head *head)
  595. {
  596. struct dev_pm_opp *opp = container_of(head, struct dev_pm_opp, rcu_head);
  597. kfree_rcu(opp, rcu_head);
  598. }
  599. /**
  600. * _opp_remove() - Remove an OPP from a table definition
  601. * @dev_opp: points back to the device_opp struct this opp belongs to
  602. * @opp: pointer to the OPP to remove
  603. * @notify: OPP_EVENT_REMOVE notification should be sent or not
  604. *
  605. * This function removes an opp definition from the opp list.
  606. *
  607. * Locking: The internal device_opp and opp structures are RCU protected.
  608. * It is assumed that the caller holds required mutex for an RCU updater
  609. * strategy.
  610. */
  611. static void _opp_remove(struct device_opp *dev_opp,
  612. struct dev_pm_opp *opp, bool notify)
  613. {
  614. /*
  615. * Notify the changes in the availability of the operable
  616. * frequency/voltage list.
  617. */
  618. if (notify)
  619. srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_REMOVE, opp);
  620. list_del_rcu(&opp->node);
  621. call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
  622. _remove_device_opp(dev_opp);
  623. }
  624. /**
  625. * dev_pm_opp_remove() - Remove an OPP from OPP list
  626. * @dev: device for which we do this operation
  627. * @freq: OPP to remove with matching 'freq'
  628. *
  629. * This function removes an opp from the opp list.
  630. *
  631. * Locking: The internal device_opp and opp structures are RCU protected.
  632. * Hence this function internally uses RCU updater strategy with mutex locks
  633. * to keep the integrity of the internal data structures. Callers should ensure
  634. * that this function is *NOT* called under RCU protection or in contexts where
  635. * mutex cannot be locked.
  636. */
  637. void dev_pm_opp_remove(struct device *dev, unsigned long freq)
  638. {
  639. struct dev_pm_opp *opp;
  640. struct device_opp *dev_opp;
  641. bool found = false;
  642. /* Hold our list modification lock here */
  643. mutex_lock(&dev_opp_list_lock);
  644. dev_opp = _find_device_opp(dev);
  645. if (IS_ERR(dev_opp))
  646. goto unlock;
  647. list_for_each_entry(opp, &dev_opp->opp_list, node) {
  648. if (opp->rate == freq) {
  649. found = true;
  650. break;
  651. }
  652. }
  653. if (!found) {
  654. dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n",
  655. __func__, freq);
  656. goto unlock;
  657. }
  658. _opp_remove(dev_opp, opp, true);
  659. unlock:
  660. mutex_unlock(&dev_opp_list_lock);
  661. }
  662. EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
  663. static struct dev_pm_opp *_allocate_opp(struct device *dev,
  664. struct device_opp **dev_opp)
  665. {
  666. struct dev_pm_opp *opp;
  667. /* allocate new OPP node */
  668. opp = kzalloc(sizeof(*opp), GFP_KERNEL);
  669. if (!opp)
  670. return NULL;
  671. INIT_LIST_HEAD(&opp->node);
  672. *dev_opp = _add_device_opp(dev);
  673. if (!*dev_opp) {
  674. kfree(opp);
  675. return NULL;
  676. }
  677. return opp;
  678. }
  679. static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
  680. struct device_opp *dev_opp)
  681. {
  682. struct dev_pm_opp *opp;
  683. struct list_head *head = &dev_opp->opp_list;
  684. /*
  685. * Insert new OPP in order of increasing frequency and discard if
  686. * already present.
  687. *
  688. * Need to use &dev_opp->opp_list in the condition part of the 'for'
  689. * loop, don't replace it with head otherwise it will become an infinite
  690. * loop.
  691. */
  692. list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) {
  693. if (new_opp->rate > opp->rate) {
  694. head = &opp->node;
  695. continue;
  696. }
  697. if (new_opp->rate < opp->rate)
  698. break;
  699. /* Duplicate OPPs */
  700. dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n",
  701. __func__, opp->rate, opp->u_volt, opp->available,
  702. new_opp->rate, new_opp->u_volt, new_opp->available);
  703. return opp->available && new_opp->u_volt == opp->u_volt ?
  704. 0 : -EEXIST;
  705. }
  706. new_opp->dev_opp = dev_opp;
  707. list_add_rcu(&new_opp->node, head);
  708. return 0;
  709. }
  710. /**
  711. * _opp_add_dynamic() - Allocate a dynamic OPP.
  712. * @dev: device for which we do this operation
  713. * @freq: Frequency in Hz for this OPP
  714. * @u_volt: Voltage in uVolts for this OPP
  715. * @dynamic: Dynamically added OPPs.
  716. *
  717. * This function adds an opp definition to the opp list and returns status.
  718. * The opp is made available by default and it can be controlled using
  719. * dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove.
  720. *
  721. * NOTE: "dynamic" parameter impacts OPPs added by the of_init_opp_table and
  722. * freed by of_free_opp_table.
  723. *
  724. * Locking: The internal device_opp and opp structures are RCU protected.
  725. * Hence this function internally uses RCU updater strategy with mutex locks
  726. * to keep the integrity of the internal data structures. Callers should ensure
  727. * that this function is *NOT* called under RCU protection or in contexts where
  728. * mutex cannot be locked.
  729. *
  730. * Return:
  731. * 0 On success OR
  732. * Duplicate OPPs (both freq and volt are same) and opp->available
  733. * -EEXIST Freq are same and volt are different OR
  734. * Duplicate OPPs (both freq and volt are same) and !opp->available
  735. * -ENOMEM Memory allocation failure
  736. */
  737. static int _opp_add_dynamic(struct device *dev, unsigned long freq,
  738. long u_volt, bool dynamic)
  739. {
  740. struct device_opp *dev_opp;
  741. struct dev_pm_opp *new_opp;
  742. int ret;
  743. /* Hold our list modification lock here */
  744. mutex_lock(&dev_opp_list_lock);
  745. new_opp = _allocate_opp(dev, &dev_opp);
  746. if (!new_opp) {
  747. ret = -ENOMEM;
  748. goto unlock;
  749. }
  750. /* populate the opp table */
  751. new_opp->rate = freq;
  752. new_opp->u_volt = u_volt;
  753. new_opp->available = true;
  754. new_opp->dynamic = dynamic;
  755. ret = _opp_add(dev, new_opp, dev_opp);
  756. if (ret)
  757. goto free_opp;
  758. mutex_unlock(&dev_opp_list_lock);
  759. /*
  760. * Notify the changes in the availability of the operable
  761. * frequency/voltage list.
  762. */
  763. srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ADD, new_opp);
  764. return 0;
  765. free_opp:
  766. _opp_remove(dev_opp, new_opp, false);
  767. unlock:
  768. mutex_unlock(&dev_opp_list_lock);
  769. return ret;
  770. }
  771. /* TODO: Support multiple regulators */
  772. static int opp_get_microvolt(struct dev_pm_opp *opp, struct device *dev)
  773. {
  774. u32 microvolt[3] = {0};
  775. int count, ret;
  776. /* Missing property isn't a problem, but an invalid entry is */
  777. if (!of_find_property(opp->np, "opp-microvolt", NULL))
  778. return 0;
  779. count = of_property_count_u32_elems(opp->np, "opp-microvolt");
  780. if (count < 0) {
  781. dev_err(dev, "%s: Invalid opp-microvolt property (%d)\n",
  782. __func__, count);
  783. return count;
  784. }
  785. /* There can be one or three elements here */
  786. if (count != 1 && count != 3) {
  787. dev_err(dev, "%s: Invalid number of elements in opp-microvolt property (%d)\n",
  788. __func__, count);
  789. return -EINVAL;
  790. }
  791. ret = of_property_read_u32_array(opp->np, "opp-microvolt", microvolt,
  792. count);
  793. if (ret) {
  794. dev_err(dev, "%s: error parsing opp-microvolt: %d\n", __func__,
  795. ret);
  796. return -EINVAL;
  797. }
  798. opp->u_volt = microvolt[0];
  799. opp->u_volt_min = microvolt[1];
  800. opp->u_volt_max = microvolt[2];
  801. return 0;
  802. }
  803. /**
  804. * _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings)
  805. * @dev: device for which we do this operation
  806. * @np: device node
  807. *
  808. * This function adds an opp definition to the opp list and returns status. The
  809. * opp can be controlled using dev_pm_opp_enable/disable functions and may be
  810. * removed by dev_pm_opp_remove.
  811. *
  812. * Locking: The internal device_opp and opp structures are RCU protected.
  813. * Hence this function internally uses RCU updater strategy with mutex locks
  814. * to keep the integrity of the internal data structures. Callers should ensure
  815. * that this function is *NOT* called under RCU protection or in contexts where
  816. * mutex cannot be locked.
  817. *
  818. * Return:
  819. * 0 On success OR
  820. * Duplicate OPPs (both freq and volt are same) and opp->available
  821. * -EEXIST Freq are same and volt are different OR
  822. * Duplicate OPPs (both freq and volt are same) and !opp->available
  823. * -ENOMEM Memory allocation failure
  824. * -EINVAL Failed parsing the OPP node
  825. */
  826. static int _opp_add_static_v2(struct device *dev, struct device_node *np)
  827. {
  828. struct device_opp *dev_opp;
  829. struct dev_pm_opp *new_opp;
  830. u64 rate;
  831. u32 val;
  832. int ret;
  833. /* Hold our list modification lock here */
  834. mutex_lock(&dev_opp_list_lock);
  835. new_opp = _allocate_opp(dev, &dev_opp);
  836. if (!new_opp) {
  837. ret = -ENOMEM;
  838. goto unlock;
  839. }
  840. ret = of_property_read_u64(np, "opp-hz", &rate);
  841. if (ret < 0) {
  842. dev_err(dev, "%s: opp-hz not found\n", __func__);
  843. goto free_opp;
  844. }
  845. /*
  846. * Rate is defined as an unsigned long in clk API, and so casting
  847. * explicitly to its type. Must be fixed once rate is 64 bit
  848. * guaranteed in clk API.
  849. */
  850. new_opp->rate = (unsigned long)rate;
  851. new_opp->turbo = of_property_read_bool(np, "turbo-mode");
  852. new_opp->np = np;
  853. new_opp->dynamic = false;
  854. new_opp->available = true;
  855. if (!of_property_read_u32(np, "clock-latency-ns", &val))
  856. new_opp->clock_latency_ns = val;
  857. ret = opp_get_microvolt(new_opp, dev);
  858. if (ret)
  859. goto free_opp;
  860. if (!of_property_read_u32(new_opp->np, "opp-microamp", &val))
  861. new_opp->u_amp = val;
  862. ret = _opp_add(dev, new_opp, dev_opp);
  863. if (ret)
  864. goto free_opp;
  865. /* OPP to select on device suspend */
  866. if (of_property_read_bool(np, "opp-suspend")) {
  867. if (dev_opp->suspend_opp)
  868. dev_warn(dev, "%s: Multiple suspend OPPs found (%lu %lu)\n",
  869. __func__, dev_opp->suspend_opp->rate,
  870. new_opp->rate);
  871. else
  872. dev_opp->suspend_opp = new_opp;
  873. }
  874. if (new_opp->clock_latency_ns > dev_opp->clock_latency_ns_max)
  875. dev_opp->clock_latency_ns_max = new_opp->clock_latency_ns;
  876. mutex_unlock(&dev_opp_list_lock);
  877. pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n",
  878. __func__, new_opp->turbo, new_opp->rate, new_opp->u_volt,
  879. new_opp->u_volt_min, new_opp->u_volt_max,
  880. new_opp->clock_latency_ns);
  881. /*
  882. * Notify the changes in the availability of the operable
  883. * frequency/voltage list.
  884. */
  885. srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ADD, new_opp);
  886. return 0;
  887. free_opp:
  888. _opp_remove(dev_opp, new_opp, false);
  889. unlock:
  890. mutex_unlock(&dev_opp_list_lock);
  891. return ret;
  892. }
  893. /**
  894. * dev_pm_opp_add() - Add an OPP table from a table definitions
  895. * @dev: device for which we do this operation
  896. * @freq: Frequency in Hz for this OPP
  897. * @u_volt: Voltage in uVolts for this OPP
  898. *
  899. * This function adds an opp definition to the opp list and returns status.
  900. * The opp is made available by default and it can be controlled using
  901. * dev_pm_opp_enable/disable functions.
  902. *
  903. * Locking: The internal device_opp and opp structures are RCU protected.
  904. * Hence this function internally uses RCU updater strategy with mutex locks
  905. * to keep the integrity of the internal data structures. Callers should ensure
  906. * that this function is *NOT* called under RCU protection or in contexts where
  907. * mutex cannot be locked.
  908. *
  909. * Return:
  910. * 0 On success OR
  911. * Duplicate OPPs (both freq and volt are same) and opp->available
  912. * -EEXIST Freq are same and volt are different OR
  913. * Duplicate OPPs (both freq and volt are same) and !opp->available
  914. * -ENOMEM Memory allocation failure
  915. */
  916. int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
  917. {
  918. return _opp_add_dynamic(dev, freq, u_volt, true);
  919. }
  920. EXPORT_SYMBOL_GPL(dev_pm_opp_add);
  921. /**
  922. * _opp_set_availability() - helper to set the availability of an opp
  923. * @dev: device for which we do this operation
  924. * @freq: OPP frequency to modify availability
  925. * @availability_req: availability status requested for this opp
  926. *
  927. * Set the availability of an OPP with an RCU operation, opp_{enable,disable}
  928. * share a common logic which is isolated here.
  929. *
  930. * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
  931. * copy operation, returns 0 if no modification was done OR modification was
  932. * successful.
  933. *
  934. * Locking: The internal device_opp and opp structures are RCU protected.
  935. * Hence this function internally uses RCU updater strategy with mutex locks to
  936. * keep the integrity of the internal data structures. Callers should ensure
  937. * that this function is *NOT* called under RCU protection or in contexts where
  938. * mutex locking or synchronize_rcu() blocking calls cannot be used.
  939. */
  940. static int _opp_set_availability(struct device *dev, unsigned long freq,
  941. bool availability_req)
  942. {
  943. struct device_opp *dev_opp;
  944. struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
  945. int r = 0;
  946. /* keep the node allocated */
  947. new_opp = kmalloc(sizeof(*new_opp), GFP_KERNEL);
  948. if (!new_opp)
  949. return -ENOMEM;
  950. mutex_lock(&dev_opp_list_lock);
  951. /* Find the device_opp */
  952. dev_opp = _find_device_opp(dev);
  953. if (IS_ERR(dev_opp)) {
  954. r = PTR_ERR(dev_opp);
  955. dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
  956. goto unlock;
  957. }
  958. /* Do we have the frequency? */
  959. list_for_each_entry(tmp_opp, &dev_opp->opp_list, node) {
  960. if (tmp_opp->rate == freq) {
  961. opp = tmp_opp;
  962. break;
  963. }
  964. }
  965. if (IS_ERR(opp)) {
  966. r = PTR_ERR(opp);
  967. goto unlock;
  968. }
  969. /* Is update really needed? */
  970. if (opp->available == availability_req)
  971. goto unlock;
  972. /* copy the old data over */
  973. *new_opp = *opp;
  974. /* plug in new node */
  975. new_opp->available = availability_req;
  976. list_replace_rcu(&opp->node, &new_opp->node);
  977. mutex_unlock(&dev_opp_list_lock);
  978. call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
  979. /* Notify the change of the OPP availability */
  980. if (availability_req)
  981. srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ENABLE,
  982. new_opp);
  983. else
  984. srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_DISABLE,
  985. new_opp);
  986. return 0;
  987. unlock:
  988. mutex_unlock(&dev_opp_list_lock);
  989. kfree(new_opp);
  990. return r;
  991. }
  992. /**
  993. * dev_pm_opp_enable() - Enable a specific OPP
  994. * @dev: device for which we do this operation
  995. * @freq: OPP frequency to enable
  996. *
  997. * Enables a provided opp. If the operation is valid, this returns 0, else the
  998. * corresponding error value. It is meant to be used for users an OPP available
  999. * after being temporarily made unavailable with dev_pm_opp_disable.
  1000. *
  1001. * Locking: The internal device_opp and opp structures are RCU protected.
  1002. * Hence this function indirectly uses RCU and mutex locks to keep the
  1003. * integrity of the internal data structures. Callers should ensure that
  1004. * this function is *NOT* called under RCU protection or in contexts where
  1005. * mutex locking or synchronize_rcu() blocking calls cannot be used.
  1006. *
  1007. * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
  1008. * copy operation, returns 0 if no modification was done OR modification was
  1009. * successful.
  1010. */
  1011. int dev_pm_opp_enable(struct device *dev, unsigned long freq)
  1012. {
  1013. return _opp_set_availability(dev, freq, true);
  1014. }
  1015. EXPORT_SYMBOL_GPL(dev_pm_opp_enable);
  1016. /**
  1017. * dev_pm_opp_disable() - Disable a specific OPP
  1018. * @dev: device for which we do this operation
  1019. * @freq: OPP frequency to disable
  1020. *
  1021. * Disables a provided opp. If the operation is valid, this returns
  1022. * 0, else the corresponding error value. It is meant to be a temporary
  1023. * control by users to make this OPP not available until the circumstances are
  1024. * right to make it available again (with a call to dev_pm_opp_enable).
  1025. *
  1026. * Locking: The internal device_opp and opp structures are RCU protected.
  1027. * Hence this function indirectly uses RCU and mutex locks to keep the
  1028. * integrity of the internal data structures. Callers should ensure that
  1029. * this function is *NOT* called under RCU protection or in contexts where
  1030. * mutex locking or synchronize_rcu() blocking calls cannot be used.
  1031. *
  1032. * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
  1033. * copy operation, returns 0 if no modification was done OR modification was
  1034. * successful.
  1035. */
  1036. int dev_pm_opp_disable(struct device *dev, unsigned long freq)
  1037. {
  1038. return _opp_set_availability(dev, freq, false);
  1039. }
  1040. EXPORT_SYMBOL_GPL(dev_pm_opp_disable);
  1041. /**
  1042. * dev_pm_opp_get_notifier() - find notifier_head of the device with opp
  1043. * @dev: device pointer used to lookup device OPPs.
  1044. *
  1045. * Return: pointer to notifier head if found, otherwise -ENODEV or
  1046. * -EINVAL based on type of error casted as pointer. value must be checked
  1047. * with IS_ERR to determine valid pointer or error result.
  1048. *
  1049. * Locking: This function must be called under rcu_read_lock(). dev_opp is a RCU
  1050. * protected pointer. The reason for the same is that the opp pointer which is
  1051. * returned will remain valid for use with opp_get_{voltage, freq} only while
  1052. * under the locked area. The pointer returned must be used prior to unlocking
  1053. * with rcu_read_unlock() to maintain the integrity of the pointer.
  1054. */
  1055. struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev)
  1056. {
  1057. struct device_opp *dev_opp = _find_device_opp(dev);
  1058. if (IS_ERR(dev_opp))
  1059. return ERR_CAST(dev_opp); /* matching type */
  1060. return &dev_opp->srcu_head;
  1061. }
  1062. EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier);
  1063. #ifdef CONFIG_OF
  1064. /**
  1065. * of_free_opp_table() - Free OPP table entries created from static DT entries
  1066. * @dev: device pointer used to lookup device OPPs.
  1067. *
  1068. * Free OPPs created using static entries present in DT.
  1069. *
  1070. * Locking: The internal device_opp and opp structures are RCU protected.
  1071. * Hence this function indirectly uses RCU updater strategy with mutex locks
  1072. * to keep the integrity of the internal data structures. Callers should ensure
  1073. * that this function is *NOT* called under RCU protection or in contexts where
  1074. * mutex cannot be locked.
  1075. */
  1076. void of_free_opp_table(struct device *dev)
  1077. {
  1078. struct device_opp *dev_opp;
  1079. struct dev_pm_opp *opp, *tmp;
  1080. /* Hold our list modification lock here */
  1081. mutex_lock(&dev_opp_list_lock);
  1082. /* Check for existing list for 'dev' */
  1083. dev_opp = _find_device_opp(dev);
  1084. if (IS_ERR(dev_opp)) {
  1085. int error = PTR_ERR(dev_opp);
  1086. if (error != -ENODEV)
  1087. WARN(1, "%s: dev_opp: %d\n",
  1088. IS_ERR_OR_NULL(dev) ?
  1089. "Invalid device" : dev_name(dev),
  1090. error);
  1091. goto unlock;
  1092. }
  1093. /* Find if dev_opp manages a single device */
  1094. if (list_is_singular(&dev_opp->dev_list)) {
  1095. /* Free static OPPs */
  1096. list_for_each_entry_safe(opp, tmp, &dev_opp->opp_list, node) {
  1097. if (!opp->dynamic)
  1098. _opp_remove(dev_opp, opp, true);
  1099. }
  1100. } else {
  1101. _remove_list_dev(_find_list_dev(dev, dev_opp), dev_opp);
  1102. }
  1103. unlock:
  1104. mutex_unlock(&dev_opp_list_lock);
  1105. }
  1106. EXPORT_SYMBOL_GPL(of_free_opp_table);
  1107. void of_cpumask_free_opp_table(cpumask_var_t cpumask)
  1108. {
  1109. struct device *cpu_dev;
  1110. int cpu;
  1111. WARN_ON(cpumask_empty(cpumask));
  1112. for_each_cpu(cpu, cpumask) {
  1113. cpu_dev = get_cpu_device(cpu);
  1114. if (!cpu_dev) {
  1115. pr_err("%s: failed to get cpu%d device\n", __func__,
  1116. cpu);
  1117. continue;
  1118. }
  1119. of_free_opp_table(cpu_dev);
  1120. }
  1121. }
  1122. EXPORT_SYMBOL_GPL(of_cpumask_free_opp_table);
  1123. /* Returns opp descriptor node from its phandle. Caller must do of_node_put() */
  1124. static struct device_node *
  1125. _of_get_opp_desc_node_from_prop(struct device *dev, const struct property *prop)
  1126. {
  1127. struct device_node *opp_np;
  1128. opp_np = of_find_node_by_phandle(be32_to_cpup(prop->value));
  1129. if (!opp_np) {
  1130. dev_err(dev, "%s: Prop: %s contains invalid opp desc phandle\n",
  1131. __func__, prop->name);
  1132. return ERR_PTR(-EINVAL);
  1133. }
  1134. return opp_np;
  1135. }
  1136. /* Returns opp descriptor node for a device. Caller must do of_node_put() */
  1137. static struct device_node *_of_get_opp_desc_node(struct device *dev)
  1138. {
  1139. const struct property *prop;
  1140. prop = of_find_property(dev->of_node, "operating-points-v2", NULL);
  1141. if (!prop)
  1142. return ERR_PTR(-ENODEV);
  1143. if (!prop->value)
  1144. return ERR_PTR(-ENODATA);
  1145. /*
  1146. * TODO: Support for multiple OPP tables.
  1147. *
  1148. * There should be only ONE phandle present in "operating-points-v2"
  1149. * property.
  1150. */
  1151. if (prop->length != sizeof(__be32)) {
  1152. dev_err(dev, "%s: Invalid opp desc phandle\n", __func__);
  1153. return ERR_PTR(-EINVAL);
  1154. }
  1155. return _of_get_opp_desc_node_from_prop(dev, prop);
  1156. }
  1157. /* Initializes OPP tables based on new bindings */
  1158. static int _of_init_opp_table_v2(struct device *dev,
  1159. const struct property *prop)
  1160. {
  1161. struct device_node *opp_np, *np;
  1162. struct device_opp *dev_opp;
  1163. int ret = 0, count = 0;
  1164. if (!prop->value)
  1165. return -ENODATA;
  1166. /* Get opp node */
  1167. opp_np = _of_get_opp_desc_node_from_prop(dev, prop);
  1168. if (IS_ERR(opp_np))
  1169. return PTR_ERR(opp_np);
  1170. dev_opp = _managed_opp(opp_np);
  1171. if (dev_opp) {
  1172. /* OPPs are already managed */
  1173. if (!_add_list_dev(dev, dev_opp))
  1174. ret = -ENOMEM;
  1175. goto put_opp_np;
  1176. }
  1177. /* We have opp-list node now, iterate over it and add OPPs */
  1178. for_each_available_child_of_node(opp_np, np) {
  1179. count++;
  1180. ret = _opp_add_static_v2(dev, np);
  1181. if (ret) {
  1182. dev_err(dev, "%s: Failed to add OPP, %d\n", __func__,
  1183. ret);
  1184. goto free_table;
  1185. }
  1186. }
  1187. /* There should be one of more OPP defined */
  1188. if (WARN_ON(!count)) {
  1189. ret = -ENOENT;
  1190. goto put_opp_np;
  1191. }
  1192. dev_opp = _find_device_opp(dev);
  1193. if (WARN_ON(IS_ERR(dev_opp))) {
  1194. ret = PTR_ERR(dev_opp);
  1195. goto free_table;
  1196. }
  1197. dev_opp->np = opp_np;
  1198. dev_opp->shared_opp = of_property_read_bool(opp_np, "opp-shared");
  1199. of_node_put(opp_np);
  1200. return 0;
  1201. free_table:
  1202. of_free_opp_table(dev);
  1203. put_opp_np:
  1204. of_node_put(opp_np);
  1205. return ret;
  1206. }
  1207. /* Initializes OPP tables based on old-deprecated bindings */
  1208. static int _of_init_opp_table_v1(struct device *dev)
  1209. {
  1210. const struct property *prop;
  1211. const __be32 *val;
  1212. int nr;
  1213. prop = of_find_property(dev->of_node, "operating-points", NULL);
  1214. if (!prop)
  1215. return -ENODEV;
  1216. if (!prop->value)
  1217. return -ENODATA;
  1218. /*
  1219. * Each OPP is a set of tuples consisting of frequency and
  1220. * voltage like <freq-kHz vol-uV>.
  1221. */
  1222. nr = prop->length / sizeof(u32);
  1223. if (nr % 2) {
  1224. dev_err(dev, "%s: Invalid OPP list\n", __func__);
  1225. return -EINVAL;
  1226. }
  1227. val = prop->value;
  1228. while (nr) {
  1229. unsigned long freq = be32_to_cpup(val++) * 1000;
  1230. unsigned long volt = be32_to_cpup(val++);
  1231. if (_opp_add_dynamic(dev, freq, volt, false))
  1232. dev_warn(dev, "%s: Failed to add OPP %ld\n",
  1233. __func__, freq);
  1234. nr -= 2;
  1235. }
  1236. return 0;
  1237. }
  1238. /**
  1239. * of_init_opp_table() - Initialize opp table from device tree
  1240. * @dev: device pointer used to lookup device OPPs.
  1241. *
  1242. * Register the initial OPP table with the OPP library for given device.
  1243. *
  1244. * Locking: The internal device_opp and opp structures are RCU protected.
  1245. * Hence this function indirectly uses RCU updater strategy with mutex locks
  1246. * to keep the integrity of the internal data structures. Callers should ensure
  1247. * that this function is *NOT* called under RCU protection or in contexts where
  1248. * mutex cannot be locked.
  1249. *
  1250. * Return:
  1251. * 0 On success OR
  1252. * Duplicate OPPs (both freq and volt are same) and opp->available
  1253. * -EEXIST Freq are same and volt are different OR
  1254. * Duplicate OPPs (both freq and volt are same) and !opp->available
  1255. * -ENOMEM Memory allocation failure
  1256. * -ENODEV when 'operating-points' property is not found or is invalid data
  1257. * in device node.
  1258. * -ENODATA when empty 'operating-points' property is found
  1259. * -EINVAL when invalid entries are found in opp-v2 table
  1260. */
  1261. int of_init_opp_table(struct device *dev)
  1262. {
  1263. const struct property *prop;
  1264. /*
  1265. * OPPs have two version of bindings now. The older one is deprecated,
  1266. * try for the new binding first.
  1267. */
  1268. prop = of_find_property(dev->of_node, "operating-points-v2", NULL);
  1269. if (!prop) {
  1270. /*
  1271. * Try old-deprecated bindings for backward compatibility with
  1272. * older dtbs.
  1273. */
  1274. return _of_init_opp_table_v1(dev);
  1275. }
  1276. return _of_init_opp_table_v2(dev, prop);
  1277. }
  1278. EXPORT_SYMBOL_GPL(of_init_opp_table);
  1279. int of_cpumask_init_opp_table(cpumask_var_t cpumask)
  1280. {
  1281. struct device *cpu_dev;
  1282. int cpu, ret = 0;
  1283. WARN_ON(cpumask_empty(cpumask));
  1284. for_each_cpu(cpu, cpumask) {
  1285. cpu_dev = get_cpu_device(cpu);
  1286. if (!cpu_dev) {
  1287. pr_err("%s: failed to get cpu%d device\n", __func__,
  1288. cpu);
  1289. continue;
  1290. }
  1291. ret = of_init_opp_table(cpu_dev);
  1292. if (ret) {
  1293. pr_err("%s: couldn't find opp table for cpu:%d, %d\n",
  1294. __func__, cpu, ret);
  1295. /* Free all other OPPs */
  1296. of_cpumask_free_opp_table(cpumask);
  1297. break;
  1298. }
  1299. }
  1300. return ret;
  1301. }
  1302. EXPORT_SYMBOL_GPL(of_cpumask_init_opp_table);
  1303. /* Required only for V1 bindings, as v2 can manage it from DT itself */
  1304. int set_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask)
  1305. {
  1306. struct device_list_opp *list_dev;
  1307. struct device_opp *dev_opp;
  1308. struct device *dev;
  1309. int cpu, ret = 0;
  1310. rcu_read_lock();
  1311. dev_opp = _find_device_opp(cpu_dev);
  1312. if (IS_ERR(dev_opp)) {
  1313. ret = -EINVAL;
  1314. goto out_rcu_read_unlock;
  1315. }
  1316. for_each_cpu(cpu, cpumask) {
  1317. if (cpu == cpu_dev->id)
  1318. continue;
  1319. dev = get_cpu_device(cpu);
  1320. if (!dev) {
  1321. dev_err(cpu_dev, "%s: failed to get cpu%d device\n",
  1322. __func__, cpu);
  1323. continue;
  1324. }
  1325. list_dev = _add_list_dev(dev, dev_opp);
  1326. if (!list_dev) {
  1327. dev_err(dev, "%s: failed to add list-dev for cpu%d device\n",
  1328. __func__, cpu);
  1329. continue;
  1330. }
  1331. }
  1332. out_rcu_read_unlock:
  1333. rcu_read_unlock();
  1334. return 0;
  1335. }
  1336. EXPORT_SYMBOL_GPL(set_cpus_sharing_opps);
  1337. /*
  1338. * Works only for OPP v2 bindings.
  1339. *
  1340. * cpumask should be already set to mask of cpu_dev->id.
  1341. * Returns -ENOENT if operating-points-v2 bindings aren't supported.
  1342. */
  1343. int of_get_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask)
  1344. {
  1345. struct device_node *np, *tmp_np;
  1346. struct device *tcpu_dev;
  1347. int cpu, ret = 0;
  1348. /* Get OPP descriptor node */
  1349. np = _of_get_opp_desc_node(cpu_dev);
  1350. if (IS_ERR(np)) {
  1351. dev_dbg(cpu_dev, "%s: Couldn't find opp node: %ld\n", __func__,
  1352. PTR_ERR(np));
  1353. return -ENOENT;
  1354. }
  1355. /* OPPs are shared ? */
  1356. if (!of_property_read_bool(np, "opp-shared"))
  1357. goto put_cpu_node;
  1358. for_each_possible_cpu(cpu) {
  1359. if (cpu == cpu_dev->id)
  1360. continue;
  1361. tcpu_dev = get_cpu_device(cpu);
  1362. if (!tcpu_dev) {
  1363. dev_err(cpu_dev, "%s: failed to get cpu%d device\n",
  1364. __func__, cpu);
  1365. ret = -ENODEV;
  1366. goto put_cpu_node;
  1367. }
  1368. /* Get OPP descriptor node */
  1369. tmp_np = _of_get_opp_desc_node(tcpu_dev);
  1370. if (IS_ERR(tmp_np)) {
  1371. dev_err(tcpu_dev, "%s: Couldn't find opp node: %ld\n",
  1372. __func__, PTR_ERR(tmp_np));
  1373. ret = PTR_ERR(tmp_np);
  1374. goto put_cpu_node;
  1375. }
  1376. /* CPUs are sharing opp node */
  1377. if (np == tmp_np)
  1378. cpumask_set_cpu(cpu, cpumask);
  1379. of_node_put(tmp_np);
  1380. }
  1381. put_cpu_node:
  1382. of_node_put(np);
  1383. return ret;
  1384. }
  1385. EXPORT_SYMBOL_GPL(of_get_cpus_sharing_opps);
  1386. #endif