pm_qos.h 8.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249
  1. #ifndef _LINUX_PM_QOS_H
  2. #define _LINUX_PM_QOS_H
  3. /* interface for the pm_qos_power infrastructure of the linux kernel.
  4. *
  5. * Mark Gross <mgross@linux.intel.com>
  6. */
  7. #include <linux/plist.h>
  8. #include <linux/notifier.h>
  9. #include <linux/device.h>
  10. #include <linux/workqueue.h>
  11. enum {
  12. PM_QOS_RESERVED = 0,
  13. PM_QOS_CPU_DMA_LATENCY,
  14. PM_QOS_NETWORK_LATENCY,
  15. PM_QOS_NETWORK_THROUGHPUT,
  16. PM_QOS_MEMORY_BANDWIDTH,
  17. /* insert new class ID */
  18. PM_QOS_NUM_CLASSES,
  19. };
  20. enum pm_qos_flags_status {
  21. PM_QOS_FLAGS_UNDEFINED = -1,
  22. PM_QOS_FLAGS_NONE,
  23. PM_QOS_FLAGS_SOME,
  24. PM_QOS_FLAGS_ALL,
  25. };
  26. #define PM_QOS_DEFAULT_VALUE (-1)
  27. #define PM_QOS_LATENCY_ANY S32_MAX
  28. #define PM_QOS_LATENCY_ANY_NS ((s64)PM_QOS_LATENCY_ANY * NSEC_PER_USEC)
  29. #define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
  30. #define PM_QOS_NETWORK_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
  31. #define PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE 0
  32. #define PM_QOS_MEMORY_BANDWIDTH_DEFAULT_VALUE 0
  33. #define PM_QOS_RESUME_LATENCY_DEFAULT_VALUE PM_QOS_LATENCY_ANY
  34. #define PM_QOS_RESUME_LATENCY_NO_CONSTRAINT PM_QOS_LATENCY_ANY
  35. #define PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS PM_QOS_LATENCY_ANY_NS
  36. #define PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE 0
  37. #define PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT (-1)
  38. #define PM_QOS_FLAG_NO_POWER_OFF (1 << 0)
  39. struct pm_qos_request {
  40. struct plist_node node;
  41. int pm_qos_class;
  42. struct delayed_work work; /* for pm_qos_update_request_timeout */
  43. };
  44. struct pm_qos_flags_request {
  45. struct list_head node;
  46. s32 flags; /* Do not change to 64 bit */
  47. };
  48. enum dev_pm_qos_req_type {
  49. DEV_PM_QOS_RESUME_LATENCY = 1,
  50. DEV_PM_QOS_LATENCY_TOLERANCE,
  51. DEV_PM_QOS_FLAGS,
  52. };
  53. struct dev_pm_qos_request {
  54. enum dev_pm_qos_req_type type;
  55. union {
  56. struct plist_node pnode;
  57. struct pm_qos_flags_request flr;
  58. } data;
  59. struct device *dev;
  60. };
  61. enum pm_qos_type {
  62. PM_QOS_UNITIALIZED,
  63. PM_QOS_MAX, /* return the largest value */
  64. PM_QOS_MIN, /* return the smallest value */
  65. PM_QOS_SUM /* return the sum */
  66. };
  67. /*
  68. * Note: The lockless read path depends on the CPU accessing target_value
  69. * or effective_flags atomically. Atomic access is only guaranteed on all CPU
  70. * types linux supports for 32 bit quantites
  71. */
  72. struct pm_qos_constraints {
  73. struct plist_head list;
  74. s32 target_value; /* Do not change to 64 bit */
  75. s32 default_value;
  76. s32 no_constraint_value;
  77. enum pm_qos_type type;
  78. struct blocking_notifier_head *notifiers;
  79. };
  80. struct pm_qos_flags {
  81. struct list_head list;
  82. s32 effective_flags; /* Do not change to 64 bit */
  83. };
  84. struct dev_pm_qos {
  85. struct pm_qos_constraints resume_latency;
  86. struct pm_qos_constraints latency_tolerance;
  87. struct pm_qos_flags flags;
  88. struct dev_pm_qos_request *resume_latency_req;
  89. struct dev_pm_qos_request *latency_tolerance_req;
  90. struct dev_pm_qos_request *flags_req;
  91. };
  92. /* Action requested to pm_qos_update_target */
  93. enum pm_qos_req_action {
  94. PM_QOS_ADD_REQ, /* Add a new request */
  95. PM_QOS_UPDATE_REQ, /* Update an existing request */
  96. PM_QOS_REMOVE_REQ /* Remove an existing request */
  97. };
  98. static inline int dev_pm_qos_request_active(struct dev_pm_qos_request *req)
  99. {
  100. return req->dev != NULL;
  101. }
  102. int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
  103. enum pm_qos_req_action action, int value);
  104. bool pm_qos_update_flags(struct pm_qos_flags *pqf,
  105. struct pm_qos_flags_request *req,
  106. enum pm_qos_req_action action, s32 val);
  107. void pm_qos_add_request(struct pm_qos_request *req, int pm_qos_class,
  108. s32 value);
  109. void pm_qos_update_request(struct pm_qos_request *req,
  110. s32 new_value);
  111. void pm_qos_update_request_timeout(struct pm_qos_request *req,
  112. s32 new_value, unsigned long timeout_us);
  113. void pm_qos_remove_request(struct pm_qos_request *req);
  114. int pm_qos_request(int pm_qos_class);
  115. int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier);
  116. int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier);
  117. int pm_qos_request_active(struct pm_qos_request *req);
  118. s32 pm_qos_read_value(struct pm_qos_constraints *c);
  119. #ifdef CONFIG_PM
  120. enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask);
  121. enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask);
  122. s32 __dev_pm_qos_read_value(struct device *dev);
  123. s32 dev_pm_qos_read_value(struct device *dev);
  124. int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
  125. enum dev_pm_qos_req_type type, s32 value);
  126. int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value);
  127. int dev_pm_qos_remove_request(struct dev_pm_qos_request *req);
  128. int dev_pm_qos_add_notifier(struct device *dev,
  129. struct notifier_block *notifier);
  130. int dev_pm_qos_remove_notifier(struct device *dev,
  131. struct notifier_block *notifier);
  132. void dev_pm_qos_constraints_init(struct device *dev);
  133. void dev_pm_qos_constraints_destroy(struct device *dev);
  134. int dev_pm_qos_add_ancestor_request(struct device *dev,
  135. struct dev_pm_qos_request *req,
  136. enum dev_pm_qos_req_type type, s32 value);
  137. int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value);
  138. void dev_pm_qos_hide_latency_limit(struct device *dev);
  139. int dev_pm_qos_expose_flags(struct device *dev, s32 value);
  140. void dev_pm_qos_hide_flags(struct device *dev);
  141. int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set);
  142. s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev);
  143. int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val);
  144. int dev_pm_qos_expose_latency_tolerance(struct device *dev);
  145. void dev_pm_qos_hide_latency_tolerance(struct device *dev);
  146. static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev)
  147. {
  148. return dev->power.qos->resume_latency_req->data.pnode.prio;
  149. }
  150. static inline s32 dev_pm_qos_requested_flags(struct device *dev)
  151. {
  152. return dev->power.qos->flags_req->data.flr.flags;
  153. }
  154. static inline s32 dev_pm_qos_raw_read_value(struct device *dev)
  155. {
  156. return IS_ERR_OR_NULL(dev->power.qos) ?
  157. PM_QOS_RESUME_LATENCY_NO_CONSTRAINT :
  158. pm_qos_read_value(&dev->power.qos->resume_latency);
  159. }
  160. #else
  161. static inline enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev,
  162. s32 mask)
  163. { return PM_QOS_FLAGS_UNDEFINED; }
  164. static inline enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev,
  165. s32 mask)
  166. { return PM_QOS_FLAGS_UNDEFINED; }
  167. static inline s32 __dev_pm_qos_read_value(struct device *dev)
  168. { return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; }
  169. static inline s32 dev_pm_qos_read_value(struct device *dev)
  170. { return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; }
  171. static inline int dev_pm_qos_add_request(struct device *dev,
  172. struct dev_pm_qos_request *req,
  173. enum dev_pm_qos_req_type type,
  174. s32 value)
  175. { return 0; }
  176. static inline int dev_pm_qos_update_request(struct dev_pm_qos_request *req,
  177. s32 new_value)
  178. { return 0; }
  179. static inline int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
  180. { return 0; }
  181. static inline int dev_pm_qos_add_notifier(struct device *dev,
  182. struct notifier_block *notifier)
  183. { return 0; }
  184. static inline int dev_pm_qos_remove_notifier(struct device *dev,
  185. struct notifier_block *notifier)
  186. { return 0; }
  187. static inline void dev_pm_qos_constraints_init(struct device *dev)
  188. {
  189. dev->power.power_state = PMSG_ON;
  190. }
  191. static inline void dev_pm_qos_constraints_destroy(struct device *dev)
  192. {
  193. dev->power.power_state = PMSG_INVALID;
  194. }
  195. static inline int dev_pm_qos_add_ancestor_request(struct device *dev,
  196. struct dev_pm_qos_request *req,
  197. enum dev_pm_qos_req_type type,
  198. s32 value)
  199. { return 0; }
  200. static inline int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
  201. { return 0; }
  202. static inline void dev_pm_qos_hide_latency_limit(struct device *dev) {}
  203. static inline int dev_pm_qos_expose_flags(struct device *dev, s32 value)
  204. { return 0; }
  205. static inline void dev_pm_qos_hide_flags(struct device *dev) {}
  206. static inline int dev_pm_qos_update_flags(struct device *dev, s32 m, bool set)
  207. { return 0; }
  208. static inline s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev)
  209. { return PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT; }
  210. static inline int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
  211. { return 0; }
  212. static inline int dev_pm_qos_expose_latency_tolerance(struct device *dev)
  213. { return 0; }
  214. static inline void dev_pm_qos_hide_latency_tolerance(struct device *dev) {}
  215. static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev)
  216. {
  217. return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
  218. }
  219. static inline s32 dev_pm_qos_requested_flags(struct device *dev) { return 0; }
  220. static inline s32 dev_pm_qos_raw_read_value(struct device *dev)
  221. {
  222. return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
  223. }
  224. #endif
  225. #endif