qos.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883
  1. /*
  2. * Devices PM QoS constraints management
  3. *
  4. * Copyright (C) 2011 Texas Instruments, Inc.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. *
  11. * This module exposes the interface to kernel space for specifying
  12. * per-device PM QoS dependencies. It provides infrastructure for registration
  13. * of:
  14. *
  15. * Dependents on a QoS value : register requests
  16. * Watchers of QoS value : get notified when target QoS value changes
  17. *
  18. * This QoS design is best effort based. Dependents register their QoS needs.
  19. * Watchers register to keep track of the current QoS needs of the system.
  20. * Watchers can register a per-device notification callback using the
  21. * dev_pm_qos_*_notifier API. The notification chain data is stored in the
  22. * per-device constraint data struct.
  23. *
  24. * Note about the per-device constraint data struct allocation:
  25. * . The per-device constraints data struct ptr is tored into the device
  26. * dev_pm_info.
  27. * . To minimize the data usage by the per-device constraints, the data struct
  28. * is only allocated at the first call to dev_pm_qos_add_request.
  29. * . The data is later free'd when the device is removed from the system.
  30. * . A global mutex protects the constraints users from the data being
  31. * allocated and free'd.
  32. */
  33. #include <linux/pm_qos.h>
  34. #include <linux/spinlock.h>
  35. #include <linux/slab.h>
  36. #include <linux/device.h>
  37. #include <linux/mutex.h>
  38. #include <linux/export.h>
  39. #include <linux/pm_runtime.h>
  40. #include <linux/err.h>
  41. #include <trace/events/power.h>
  42. #include "power.h"
  43. static DEFINE_MUTEX(dev_pm_qos_mtx);
  44. static DEFINE_MUTEX(dev_pm_qos_sysfs_mtx);
  45. /**
  46. * __dev_pm_qos_flags - Check PM QoS flags for a given device.
  47. * @dev: Device to check the PM QoS flags for.
  48. * @mask: Flags to check against.
  49. *
  50. * This routine must be called with dev->power.lock held.
  51. */
  52. enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask)
  53. {
  54. struct dev_pm_qos *qos = dev->power.qos;
  55. struct pm_qos_flags *pqf;
  56. s32 val;
  57. lockdep_assert_held(&dev->power.lock);
  58. if (IS_ERR_OR_NULL(qos))
  59. return PM_QOS_FLAGS_UNDEFINED;
  60. pqf = &qos->flags;
  61. if (list_empty(&pqf->list))
  62. return PM_QOS_FLAGS_UNDEFINED;
  63. val = pqf->effective_flags & mask;
  64. if (val)
  65. return (val == mask) ? PM_QOS_FLAGS_ALL : PM_QOS_FLAGS_SOME;
  66. return PM_QOS_FLAGS_NONE;
  67. }
  68. /**
  69. * dev_pm_qos_flags - Check PM QoS flags for a given device (locked).
  70. * @dev: Device to check the PM QoS flags for.
  71. * @mask: Flags to check against.
  72. */
  73. enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask)
  74. {
  75. unsigned long irqflags;
  76. enum pm_qos_flags_status ret;
  77. spin_lock_irqsave(&dev->power.lock, irqflags);
  78. ret = __dev_pm_qos_flags(dev, mask);
  79. spin_unlock_irqrestore(&dev->power.lock, irqflags);
  80. return ret;
  81. }
  82. EXPORT_SYMBOL_GPL(dev_pm_qos_flags);
  83. /**
  84. * __dev_pm_qos_read_value - Get PM QoS constraint for a given device.
  85. * @dev: Device to get the PM QoS constraint value for.
  86. *
  87. * This routine must be called with dev->power.lock held.
  88. */
  89. s32 __dev_pm_qos_read_value(struct device *dev)
  90. {
  91. lockdep_assert_held(&dev->power.lock);
  92. return dev_pm_qos_raw_read_value(dev);
  93. }
  94. /**
  95. * dev_pm_qos_read_value - Get PM QoS constraint for a given device (locked).
  96. * @dev: Device to get the PM QoS constraint value for.
  97. */
  98. s32 dev_pm_qos_read_value(struct device *dev)
  99. {
  100. unsigned long flags;
  101. s32 ret;
  102. spin_lock_irqsave(&dev->power.lock, flags);
  103. ret = __dev_pm_qos_read_value(dev);
  104. spin_unlock_irqrestore(&dev->power.lock, flags);
  105. return ret;
  106. }
  107. /**
  108. * apply_constraint - Add/modify/remove device PM QoS request.
  109. * @req: Constraint request to apply
  110. * @action: Action to perform (add/update/remove).
  111. * @value: Value to assign to the QoS request.
  112. *
  113. * Internal function to update the constraints list using the PM QoS core
  114. * code and if needed call the per-device callbacks.
  115. */
  116. static int apply_constraint(struct dev_pm_qos_request *req,
  117. enum pm_qos_req_action action, s32 value)
  118. {
  119. struct dev_pm_qos *qos = req->dev->power.qos;
  120. int ret;
  121. switch(req->type) {
  122. case DEV_PM_QOS_RESUME_LATENCY:
  123. ret = pm_qos_update_target(&qos->resume_latency,
  124. &req->data.pnode, action, value);
  125. break;
  126. case DEV_PM_QOS_LATENCY_TOLERANCE:
  127. ret = pm_qos_update_target(&qos->latency_tolerance,
  128. &req->data.pnode, action, value);
  129. if (ret) {
  130. value = pm_qos_read_value(&qos->latency_tolerance);
  131. req->dev->power.set_latency_tolerance(req->dev, value);
  132. }
  133. break;
  134. case DEV_PM_QOS_FLAGS:
  135. ret = pm_qos_update_flags(&qos->flags, &req->data.flr,
  136. action, value);
  137. break;
  138. default:
  139. ret = -EINVAL;
  140. }
  141. return ret;
  142. }
  143. /*
  144. * dev_pm_qos_constraints_allocate
  145. * @dev: device to allocate data for
  146. *
  147. * Called at the first call to add_request, for constraint data allocation
  148. * Must be called with the dev_pm_qos_mtx mutex held
  149. */
  150. static int dev_pm_qos_constraints_allocate(struct device *dev)
  151. {
  152. struct dev_pm_qos *qos;
  153. struct pm_qos_constraints *c;
  154. struct blocking_notifier_head *n;
  155. qos = kzalloc(sizeof(*qos), GFP_KERNEL);
  156. if (!qos)
  157. return -ENOMEM;
  158. n = kzalloc(sizeof(*n), GFP_KERNEL);
  159. if (!n) {
  160. kfree(qos);
  161. return -ENOMEM;
  162. }
  163. BLOCKING_INIT_NOTIFIER_HEAD(n);
  164. c = &qos->resume_latency;
  165. plist_head_init(&c->list);
  166. c->target_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
  167. c->default_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
  168. c->no_constraint_value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
  169. c->type = PM_QOS_MIN;
  170. c->notifiers = n;
  171. c = &qos->latency_tolerance;
  172. plist_head_init(&c->list);
  173. c->target_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
  174. c->default_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
  175. c->no_constraint_value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
  176. c->type = PM_QOS_MIN;
  177. INIT_LIST_HEAD(&qos->flags.list);
  178. spin_lock_irq(&dev->power.lock);
  179. dev->power.qos = qos;
  180. spin_unlock_irq(&dev->power.lock);
  181. return 0;
  182. }
  183. static void __dev_pm_qos_hide_latency_limit(struct device *dev);
  184. static void __dev_pm_qos_hide_flags(struct device *dev);
  185. /**
  186. * dev_pm_qos_constraints_destroy
  187. * @dev: target device
  188. *
  189. * Called from the device PM subsystem on device removal under device_pm_lock().
  190. */
  191. void dev_pm_qos_constraints_destroy(struct device *dev)
  192. {
  193. struct dev_pm_qos *qos;
  194. struct dev_pm_qos_request *req, *tmp;
  195. struct pm_qos_constraints *c;
  196. struct pm_qos_flags *f;
  197. mutex_lock(&dev_pm_qos_sysfs_mtx);
  198. /*
  199. * If the device's PM QoS resume latency limit or PM QoS flags have been
  200. * exposed to user space, they have to be hidden at this point.
  201. */
  202. pm_qos_sysfs_remove_resume_latency(dev);
  203. pm_qos_sysfs_remove_flags(dev);
  204. mutex_lock(&dev_pm_qos_mtx);
  205. __dev_pm_qos_hide_latency_limit(dev);
  206. __dev_pm_qos_hide_flags(dev);
  207. qos = dev->power.qos;
  208. if (!qos)
  209. goto out;
  210. /* Flush the constraints lists for the device. */
  211. c = &qos->resume_latency;
  212. plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
  213. /*
  214. * Update constraints list and call the notification
  215. * callbacks if needed
  216. */
  217. apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
  218. memset(req, 0, sizeof(*req));
  219. }
  220. c = &qos->latency_tolerance;
  221. plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
  222. apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
  223. memset(req, 0, sizeof(*req));
  224. }
  225. f = &qos->flags;
  226. list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) {
  227. apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
  228. memset(req, 0, sizeof(*req));
  229. }
  230. spin_lock_irq(&dev->power.lock);
  231. dev->power.qos = ERR_PTR(-ENODEV);
  232. spin_unlock_irq(&dev->power.lock);
  233. kfree(qos->resume_latency.notifiers);
  234. kfree(qos);
  235. out:
  236. mutex_unlock(&dev_pm_qos_mtx);
  237. mutex_unlock(&dev_pm_qos_sysfs_mtx);
  238. }
  239. static bool dev_pm_qos_invalid_req_type(struct device *dev,
  240. enum dev_pm_qos_req_type type)
  241. {
  242. return type == DEV_PM_QOS_LATENCY_TOLERANCE &&
  243. !dev->power.set_latency_tolerance;
  244. }
  245. static int __dev_pm_qos_add_request(struct device *dev,
  246. struct dev_pm_qos_request *req,
  247. enum dev_pm_qos_req_type type, s32 value)
  248. {
  249. int ret = 0;
  250. if (!dev || !req || dev_pm_qos_invalid_req_type(dev, type))
  251. return -EINVAL;
  252. if (WARN(dev_pm_qos_request_active(req),
  253. "%s() called for already added request\n", __func__))
  254. return -EINVAL;
  255. if (IS_ERR(dev->power.qos))
  256. ret = -ENODEV;
  257. else if (!dev->power.qos)
  258. ret = dev_pm_qos_constraints_allocate(dev);
  259. trace_dev_pm_qos_add_request(dev_name(dev), type, value);
  260. if (!ret) {
  261. req->dev = dev;
  262. req->type = type;
  263. ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
  264. }
  265. return ret;
  266. }
  267. /**
  268. * dev_pm_qos_add_request - inserts new qos request into the list
  269. * @dev: target device for the constraint
  270. * @req: pointer to a preallocated handle
  271. * @type: type of the request
  272. * @value: defines the qos request
  273. *
  274. * This function inserts a new entry in the device constraints list of
  275. * requested qos performance characteristics. It recomputes the aggregate
  276. * QoS expectations of parameters and initializes the dev_pm_qos_request
  277. * handle. Caller needs to save this handle for later use in updates and
  278. * removal.
  279. *
  280. * Returns 1 if the aggregated constraint value has changed,
  281. * 0 if the aggregated constraint value has not changed,
  282. * -EINVAL in case of wrong parameters, -ENOMEM if there's not enough memory
  283. * to allocate for data structures, -ENODEV if the device has just been removed
  284. * from the system.
  285. *
  286. * Callers should ensure that the target device is not RPM_SUSPENDED before
  287. * using this function for requests of type DEV_PM_QOS_FLAGS.
  288. */
  289. int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
  290. enum dev_pm_qos_req_type type, s32 value)
  291. {
  292. int ret;
  293. mutex_lock(&dev_pm_qos_mtx);
  294. ret = __dev_pm_qos_add_request(dev, req, type, value);
  295. mutex_unlock(&dev_pm_qos_mtx);
  296. return ret;
  297. }
  298. EXPORT_SYMBOL_GPL(dev_pm_qos_add_request);
  299. /**
  300. * __dev_pm_qos_update_request - Modify an existing device PM QoS request.
  301. * @req : PM QoS request to modify.
  302. * @new_value: New value to request.
  303. */
  304. static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
  305. s32 new_value)
  306. {
  307. s32 curr_value;
  308. int ret = 0;
  309. if (!req) /*guard against callers passing in null */
  310. return -EINVAL;
  311. if (WARN(!dev_pm_qos_request_active(req),
  312. "%s() called for unknown object\n", __func__))
  313. return -EINVAL;
  314. if (IS_ERR_OR_NULL(req->dev->power.qos))
  315. return -ENODEV;
  316. switch(req->type) {
  317. case DEV_PM_QOS_RESUME_LATENCY:
  318. case DEV_PM_QOS_LATENCY_TOLERANCE:
  319. curr_value = req->data.pnode.prio;
  320. break;
  321. case DEV_PM_QOS_FLAGS:
  322. curr_value = req->data.flr.flags;
  323. break;
  324. default:
  325. return -EINVAL;
  326. }
  327. trace_dev_pm_qos_update_request(dev_name(req->dev), req->type,
  328. new_value);
  329. if (curr_value != new_value)
  330. ret = apply_constraint(req, PM_QOS_UPDATE_REQ, new_value);
  331. return ret;
  332. }
  333. /**
  334. * dev_pm_qos_update_request - modifies an existing qos request
  335. * @req : handle to list element holding a dev_pm_qos request to use
  336. * @new_value: defines the qos request
  337. *
  338. * Updates an existing dev PM qos request along with updating the
  339. * target value.
  340. *
  341. * Attempts are made to make this code callable on hot code paths.
  342. *
  343. * Returns 1 if the aggregated constraint value has changed,
  344. * 0 if the aggregated constraint value has not changed,
  345. * -EINVAL in case of wrong parameters, -ENODEV if the device has been
  346. * removed from the system
  347. *
  348. * Callers should ensure that the target device is not RPM_SUSPENDED before
  349. * using this function for requests of type DEV_PM_QOS_FLAGS.
  350. */
  351. int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value)
  352. {
  353. int ret;
  354. mutex_lock(&dev_pm_qos_mtx);
  355. ret = __dev_pm_qos_update_request(req, new_value);
  356. mutex_unlock(&dev_pm_qos_mtx);
  357. return ret;
  358. }
  359. EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
  360. static int __dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
  361. {
  362. int ret;
  363. if (!req) /*guard against callers passing in null */
  364. return -EINVAL;
  365. if (WARN(!dev_pm_qos_request_active(req),
  366. "%s() called for unknown object\n", __func__))
  367. return -EINVAL;
  368. if (IS_ERR_OR_NULL(req->dev->power.qos))
  369. return -ENODEV;
  370. trace_dev_pm_qos_remove_request(dev_name(req->dev), req->type,
  371. PM_QOS_DEFAULT_VALUE);
  372. ret = apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
  373. memset(req, 0, sizeof(*req));
  374. return ret;
  375. }
  376. /**
  377. * dev_pm_qos_remove_request - modifies an existing qos request
  378. * @req: handle to request list element
  379. *
  380. * Will remove pm qos request from the list of constraints and
  381. * recompute the current target value. Call this on slow code paths.
  382. *
  383. * Returns 1 if the aggregated constraint value has changed,
  384. * 0 if the aggregated constraint value has not changed,
  385. * -EINVAL in case of wrong parameters, -ENODEV if the device has been
  386. * removed from the system
  387. *
  388. * Callers should ensure that the target device is not RPM_SUSPENDED before
  389. * using this function for requests of type DEV_PM_QOS_FLAGS.
  390. */
  391. int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
  392. {
  393. int ret;
  394. mutex_lock(&dev_pm_qos_mtx);
  395. ret = __dev_pm_qos_remove_request(req);
  396. mutex_unlock(&dev_pm_qos_mtx);
  397. return ret;
  398. }
  399. EXPORT_SYMBOL_GPL(dev_pm_qos_remove_request);
  400. /**
  401. * dev_pm_qos_add_notifier - sets notification entry for changes to target value
  402. * of per-device PM QoS constraints
  403. *
  404. * @dev: target device for the constraint
  405. * @notifier: notifier block managed by caller.
  406. *
  407. * Will register the notifier into a notification chain that gets called
  408. * upon changes to the target value for the device.
  409. *
  410. * If the device's constraints object doesn't exist when this routine is called,
  411. * it will be created (or error code will be returned if that fails).
  412. */
  413. int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier)
  414. {
  415. int ret = 0;
  416. mutex_lock(&dev_pm_qos_mtx);
  417. if (IS_ERR(dev->power.qos))
  418. ret = -ENODEV;
  419. else if (!dev->power.qos)
  420. ret = dev_pm_qos_constraints_allocate(dev);
  421. if (!ret)
  422. ret = blocking_notifier_chain_register(dev->power.qos->resume_latency.notifiers,
  423. notifier);
  424. mutex_unlock(&dev_pm_qos_mtx);
  425. return ret;
  426. }
  427. EXPORT_SYMBOL_GPL(dev_pm_qos_add_notifier);
  428. /**
  429. * dev_pm_qos_remove_notifier - deletes notification for changes to target value
  430. * of per-device PM QoS constraints
  431. *
  432. * @dev: target device for the constraint
  433. * @notifier: notifier block to be removed.
  434. *
  435. * Will remove the notifier from the notification chain that gets called
  436. * upon changes to the target value.
  437. */
  438. int dev_pm_qos_remove_notifier(struct device *dev,
  439. struct notifier_block *notifier)
  440. {
  441. int retval = 0;
  442. mutex_lock(&dev_pm_qos_mtx);
  443. /* Silently return if the constraints object is not present. */
  444. if (!IS_ERR_OR_NULL(dev->power.qos))
  445. retval = blocking_notifier_chain_unregister(dev->power.qos->resume_latency.notifiers,
  446. notifier);
  447. mutex_unlock(&dev_pm_qos_mtx);
  448. return retval;
  449. }
  450. EXPORT_SYMBOL_GPL(dev_pm_qos_remove_notifier);
  451. /**
  452. * dev_pm_qos_add_ancestor_request - Add PM QoS request for device's ancestor.
  453. * @dev: Device whose ancestor to add the request for.
  454. * @req: Pointer to the preallocated handle.
  455. * @type: Type of the request.
  456. * @value: Constraint latency value.
  457. */
  458. int dev_pm_qos_add_ancestor_request(struct device *dev,
  459. struct dev_pm_qos_request *req,
  460. enum dev_pm_qos_req_type type, s32 value)
  461. {
  462. struct device *ancestor = dev->parent;
  463. int ret = -ENODEV;
  464. switch (type) {
  465. case DEV_PM_QOS_RESUME_LATENCY:
  466. while (ancestor && !ancestor->power.ignore_children)
  467. ancestor = ancestor->parent;
  468. break;
  469. case DEV_PM_QOS_LATENCY_TOLERANCE:
  470. while (ancestor && !ancestor->power.set_latency_tolerance)
  471. ancestor = ancestor->parent;
  472. break;
  473. default:
  474. ancestor = NULL;
  475. }
  476. if (ancestor)
  477. ret = dev_pm_qos_add_request(ancestor, req, type, value);
  478. if (ret < 0)
  479. req->dev = NULL;
  480. return ret;
  481. }
  482. EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
  483. static void __dev_pm_qos_drop_user_request(struct device *dev,
  484. enum dev_pm_qos_req_type type)
  485. {
  486. struct dev_pm_qos_request *req = NULL;
  487. switch(type) {
  488. case DEV_PM_QOS_RESUME_LATENCY:
  489. req = dev->power.qos->resume_latency_req;
  490. dev->power.qos->resume_latency_req = NULL;
  491. break;
  492. case DEV_PM_QOS_LATENCY_TOLERANCE:
  493. req = dev->power.qos->latency_tolerance_req;
  494. dev->power.qos->latency_tolerance_req = NULL;
  495. break;
  496. case DEV_PM_QOS_FLAGS:
  497. req = dev->power.qos->flags_req;
  498. dev->power.qos->flags_req = NULL;
  499. break;
  500. }
  501. __dev_pm_qos_remove_request(req);
  502. kfree(req);
  503. }
  504. static void dev_pm_qos_drop_user_request(struct device *dev,
  505. enum dev_pm_qos_req_type type)
  506. {
  507. mutex_lock(&dev_pm_qos_mtx);
  508. __dev_pm_qos_drop_user_request(dev, type);
  509. mutex_unlock(&dev_pm_qos_mtx);
  510. }
  511. /**
  512. * dev_pm_qos_expose_latency_limit - Expose PM QoS latency limit to user space.
  513. * @dev: Device whose PM QoS latency limit is to be exposed to user space.
  514. * @value: Initial value of the latency limit.
  515. */
  516. int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
  517. {
  518. struct dev_pm_qos_request *req;
  519. int ret;
  520. if (!device_is_registered(dev) || value < 0)
  521. return -EINVAL;
  522. req = kzalloc(sizeof(*req), GFP_KERNEL);
  523. if (!req)
  524. return -ENOMEM;
  525. ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_RESUME_LATENCY, value);
  526. if (ret < 0) {
  527. kfree(req);
  528. return ret;
  529. }
  530. mutex_lock(&dev_pm_qos_sysfs_mtx);
  531. mutex_lock(&dev_pm_qos_mtx);
  532. if (IS_ERR_OR_NULL(dev->power.qos))
  533. ret = -ENODEV;
  534. else if (dev->power.qos->resume_latency_req)
  535. ret = -EEXIST;
  536. if (ret < 0) {
  537. __dev_pm_qos_remove_request(req);
  538. kfree(req);
  539. mutex_unlock(&dev_pm_qos_mtx);
  540. goto out;
  541. }
  542. dev->power.qos->resume_latency_req = req;
  543. mutex_unlock(&dev_pm_qos_mtx);
  544. ret = pm_qos_sysfs_add_resume_latency(dev);
  545. if (ret)
  546. dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);
  547. out:
  548. mutex_unlock(&dev_pm_qos_sysfs_mtx);
  549. return ret;
  550. }
  551. EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
  552. static void __dev_pm_qos_hide_latency_limit(struct device *dev)
  553. {
  554. if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->resume_latency_req)
  555. __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);
  556. }
  557. /**
  558. * dev_pm_qos_hide_latency_limit - Hide PM QoS latency limit from user space.
  559. * @dev: Device whose PM QoS latency limit is to be hidden from user space.
  560. */
  561. void dev_pm_qos_hide_latency_limit(struct device *dev)
  562. {
  563. mutex_lock(&dev_pm_qos_sysfs_mtx);
  564. pm_qos_sysfs_remove_resume_latency(dev);
  565. mutex_lock(&dev_pm_qos_mtx);
  566. __dev_pm_qos_hide_latency_limit(dev);
  567. mutex_unlock(&dev_pm_qos_mtx);
  568. mutex_unlock(&dev_pm_qos_sysfs_mtx);
  569. }
  570. EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit);
  571. /**
  572. * dev_pm_qos_expose_flags - Expose PM QoS flags of a device to user space.
  573. * @dev: Device whose PM QoS flags are to be exposed to user space.
  574. * @val: Initial values of the flags.
  575. */
  576. int dev_pm_qos_expose_flags(struct device *dev, s32 val)
  577. {
  578. struct dev_pm_qos_request *req;
  579. int ret;
  580. if (!device_is_registered(dev))
  581. return -EINVAL;
  582. req = kzalloc(sizeof(*req), GFP_KERNEL);
  583. if (!req)
  584. return -ENOMEM;
  585. ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val);
  586. if (ret < 0) {
  587. kfree(req);
  588. return ret;
  589. }
  590. pm_runtime_get_sync(dev);
  591. mutex_lock(&dev_pm_qos_sysfs_mtx);
  592. mutex_lock(&dev_pm_qos_mtx);
  593. if (IS_ERR_OR_NULL(dev->power.qos))
  594. ret = -ENODEV;
  595. else if (dev->power.qos->flags_req)
  596. ret = -EEXIST;
  597. if (ret < 0) {
  598. __dev_pm_qos_remove_request(req);
  599. kfree(req);
  600. mutex_unlock(&dev_pm_qos_mtx);
  601. goto out;
  602. }
  603. dev->power.qos->flags_req = req;
  604. mutex_unlock(&dev_pm_qos_mtx);
  605. ret = pm_qos_sysfs_add_flags(dev);
  606. if (ret)
  607. dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
  608. out:
  609. mutex_unlock(&dev_pm_qos_sysfs_mtx);
  610. pm_runtime_put(dev);
  611. return ret;
  612. }
  613. EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags);
  614. static void __dev_pm_qos_hide_flags(struct device *dev)
  615. {
  616. if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req)
  617. __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
  618. }
  619. /**
  620. * dev_pm_qos_hide_flags - Hide PM QoS flags of a device from user space.
  621. * @dev: Device whose PM QoS flags are to be hidden from user space.
  622. */
  623. void dev_pm_qos_hide_flags(struct device *dev)
  624. {
  625. pm_runtime_get_sync(dev);
  626. mutex_lock(&dev_pm_qos_sysfs_mtx);
  627. pm_qos_sysfs_remove_flags(dev);
  628. mutex_lock(&dev_pm_qos_mtx);
  629. __dev_pm_qos_hide_flags(dev);
  630. mutex_unlock(&dev_pm_qos_mtx);
  631. mutex_unlock(&dev_pm_qos_sysfs_mtx);
  632. pm_runtime_put(dev);
  633. }
  634. EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags);
  635. /**
  636. * dev_pm_qos_update_flags - Update PM QoS flags request owned by user space.
  637. * @dev: Device to update the PM QoS flags request for.
  638. * @mask: Flags to set/clear.
  639. * @set: Whether to set or clear the flags (true means set).
  640. */
  641. int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
  642. {
  643. s32 value;
  644. int ret;
  645. pm_runtime_get_sync(dev);
  646. mutex_lock(&dev_pm_qos_mtx);
  647. if (IS_ERR_OR_NULL(dev->power.qos) || !dev->power.qos->flags_req) {
  648. ret = -EINVAL;
  649. goto out;
  650. }
  651. value = dev_pm_qos_requested_flags(dev);
  652. if (set)
  653. value |= mask;
  654. else
  655. value &= ~mask;
  656. ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value);
  657. out:
  658. mutex_unlock(&dev_pm_qos_mtx);
  659. pm_runtime_put(dev);
  660. return ret;
  661. }
  662. /**
  663. * dev_pm_qos_get_user_latency_tolerance - Get user space latency tolerance.
  664. * @dev: Device to obtain the user space latency tolerance for.
  665. */
  666. s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev)
  667. {
  668. s32 ret;
  669. mutex_lock(&dev_pm_qos_mtx);
  670. ret = IS_ERR_OR_NULL(dev->power.qos)
  671. || !dev->power.qos->latency_tolerance_req ?
  672. PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT :
  673. dev->power.qos->latency_tolerance_req->data.pnode.prio;
  674. mutex_unlock(&dev_pm_qos_mtx);
  675. return ret;
  676. }
  677. /**
  678. * dev_pm_qos_update_user_latency_tolerance - Update user space latency tolerance.
  679. * @dev: Device to update the user space latency tolerance for.
  680. * @val: New user space latency tolerance for @dev (negative values disable).
  681. */
  682. int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
  683. {
  684. int ret;
  685. mutex_lock(&dev_pm_qos_mtx);
  686. if (IS_ERR_OR_NULL(dev->power.qos)
  687. || !dev->power.qos->latency_tolerance_req) {
  688. struct dev_pm_qos_request *req;
  689. if (val < 0) {
  690. if (val == PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT)
  691. ret = 0;
  692. else
  693. ret = -EINVAL;
  694. goto out;
  695. }
  696. req = kzalloc(sizeof(*req), GFP_KERNEL);
  697. if (!req) {
  698. ret = -ENOMEM;
  699. goto out;
  700. }
  701. ret = __dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY_TOLERANCE, val);
  702. if (ret < 0) {
  703. kfree(req);
  704. goto out;
  705. }
  706. dev->power.qos->latency_tolerance_req = req;
  707. } else {
  708. if (val < 0) {
  709. __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY_TOLERANCE);
  710. ret = 0;
  711. } else {
  712. ret = __dev_pm_qos_update_request(dev->power.qos->latency_tolerance_req, val);
  713. }
  714. }
  715. out:
  716. mutex_unlock(&dev_pm_qos_mtx);
  717. return ret;
  718. }
  719. EXPORT_SYMBOL_GPL(dev_pm_qos_update_user_latency_tolerance);
  720. /**
  721. * dev_pm_qos_expose_latency_tolerance - Expose latency tolerance to userspace
  722. * @dev: Device whose latency tolerance to expose
  723. */
  724. int dev_pm_qos_expose_latency_tolerance(struct device *dev)
  725. {
  726. int ret;
  727. if (!dev->power.set_latency_tolerance)
  728. return -EINVAL;
  729. mutex_lock(&dev_pm_qos_sysfs_mtx);
  730. ret = pm_qos_sysfs_add_latency_tolerance(dev);
  731. mutex_unlock(&dev_pm_qos_sysfs_mtx);
  732. return ret;
  733. }
  734. EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_tolerance);
  735. /**
  736. * dev_pm_qos_hide_latency_tolerance - Hide latency tolerance from userspace
  737. * @dev: Device whose latency tolerance to hide
  738. */
  739. void dev_pm_qos_hide_latency_tolerance(struct device *dev)
  740. {
  741. mutex_lock(&dev_pm_qos_sysfs_mtx);
  742. pm_qos_sysfs_remove_latency_tolerance(dev);
  743. mutex_unlock(&dev_pm_qos_sysfs_mtx);
  744. /* Remove the request from user space now */
  745. pm_runtime_get_sync(dev);
  746. dev_pm_qos_update_user_latency_tolerance(dev,
  747. PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT);
  748. pm_runtime_put(dev);
  749. }
  750. EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_tolerance);