pm_qos.h 8.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241
  1. #ifndef _LINUX_PM_QOS_H
  2. #define _LINUX_PM_QOS_H
  3. /* interface for the pm_qos_power infrastructure of the linux kernel.
  4. *
  5. * Mark Gross <mgross@linux.intel.com>
  6. */
  7. #include <linux/plist.h>
  8. #include <linux/notifier.h>
  9. #include <linux/device.h>
  10. #include <linux/workqueue.h>
  11. enum {
  12. PM_QOS_RESERVED = 0,
  13. PM_QOS_CPU_DMA_LATENCY,
  14. PM_QOS_NETWORK_LATENCY,
  15. PM_QOS_NETWORK_THROUGHPUT,
  16. PM_QOS_MEMORY_BANDWIDTH,
  17. /* insert new class ID */
  18. PM_QOS_NUM_CLASSES,
  19. };
  20. enum pm_qos_flags_status {
  21. PM_QOS_FLAGS_UNDEFINED = -1,
  22. PM_QOS_FLAGS_NONE,
  23. PM_QOS_FLAGS_SOME,
  24. PM_QOS_FLAGS_ALL,
  25. };
  26. #define PM_QOS_DEFAULT_VALUE -1
  27. #define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
  28. #define PM_QOS_NETWORK_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
  29. #define PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE 0
  30. #define PM_QOS_MEMORY_BANDWIDTH_DEFAULT_VALUE 0
  31. #define PM_QOS_RESUME_LATENCY_DEFAULT_VALUE 0
  32. #define PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE 0
  33. #define PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT (-1)
  34. #define PM_QOS_LATENCY_ANY ((s32)(~(__u32)0 >> 1))
  35. #define PM_QOS_FLAG_NO_POWER_OFF (1 << 0)
  36. #define PM_QOS_FLAG_REMOTE_WAKEUP (1 << 1)
  37. struct pm_qos_request {
  38. struct plist_node node;
  39. int pm_qos_class;
  40. struct delayed_work work; /* for pm_qos_update_request_timeout */
  41. };
  42. struct pm_qos_flags_request {
  43. struct list_head node;
  44. s32 flags; /* Do not change to 64 bit */
  45. };
  46. enum dev_pm_qos_req_type {
  47. DEV_PM_QOS_RESUME_LATENCY = 1,
  48. DEV_PM_QOS_LATENCY_TOLERANCE,
  49. DEV_PM_QOS_FLAGS,
  50. };
  51. struct dev_pm_qos_request {
  52. enum dev_pm_qos_req_type type;
  53. union {
  54. struct plist_node pnode;
  55. struct pm_qos_flags_request flr;
  56. } data;
  57. struct device *dev;
  58. };
  59. enum pm_qos_type {
  60. PM_QOS_UNITIALIZED,
  61. PM_QOS_MAX, /* return the largest value */
  62. PM_QOS_MIN, /* return the smallest value */
  63. PM_QOS_SUM /* return the sum */
  64. };
  65. /*
  66. * Note: The lockless read path depends on the CPU accessing target_value
  67. * or effective_flags atomically. Atomic access is only guaranteed on all CPU
  68. * types linux supports for 32 bit quantites
  69. */
  70. struct pm_qos_constraints {
  71. struct plist_head list;
  72. s32 target_value; /* Do not change to 64 bit */
  73. s32 default_value;
  74. s32 no_constraint_value;
  75. enum pm_qos_type type;
  76. struct blocking_notifier_head *notifiers;
  77. };
  78. struct pm_qos_flags {
  79. struct list_head list;
  80. s32 effective_flags; /* Do not change to 64 bit */
  81. };
  82. struct dev_pm_qos {
  83. struct pm_qos_constraints resume_latency;
  84. struct pm_qos_constraints latency_tolerance;
  85. struct pm_qos_flags flags;
  86. struct dev_pm_qos_request *resume_latency_req;
  87. struct dev_pm_qos_request *latency_tolerance_req;
  88. struct dev_pm_qos_request *flags_req;
  89. };
  90. /* Action requested to pm_qos_update_target */
  91. enum pm_qos_req_action {
  92. PM_QOS_ADD_REQ, /* Add a new request */
  93. PM_QOS_UPDATE_REQ, /* Update an existing request */
  94. PM_QOS_REMOVE_REQ /* Remove an existing request */
  95. };
  96. static inline int dev_pm_qos_request_active(struct dev_pm_qos_request *req)
  97. {
  98. return req->dev != NULL;
  99. }
  100. int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
  101. enum pm_qos_req_action action, int value);
  102. bool pm_qos_update_flags(struct pm_qos_flags *pqf,
  103. struct pm_qos_flags_request *req,
  104. enum pm_qos_req_action action, s32 val);
  105. void pm_qos_add_request(struct pm_qos_request *req, int pm_qos_class,
  106. s32 value);
  107. void pm_qos_update_request(struct pm_qos_request *req,
  108. s32 new_value);
  109. void pm_qos_update_request_timeout(struct pm_qos_request *req,
  110. s32 new_value, unsigned long timeout_us);
  111. void pm_qos_remove_request(struct pm_qos_request *req);
  112. int pm_qos_request(int pm_qos_class);
  113. int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier);
  114. int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier);
  115. int pm_qos_request_active(struct pm_qos_request *req);
  116. s32 pm_qos_read_value(struct pm_qos_constraints *c);
  117. #ifdef CONFIG_PM
  118. enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask);
  119. enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask);
  120. s32 __dev_pm_qos_read_value(struct device *dev);
  121. s32 dev_pm_qos_read_value(struct device *dev);
  122. int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
  123. enum dev_pm_qos_req_type type, s32 value);
  124. int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value);
  125. int dev_pm_qos_remove_request(struct dev_pm_qos_request *req);
  126. int dev_pm_qos_add_notifier(struct device *dev,
  127. struct notifier_block *notifier);
  128. int dev_pm_qos_remove_notifier(struct device *dev,
  129. struct notifier_block *notifier);
  130. int dev_pm_qos_add_global_notifier(struct notifier_block *notifier);
  131. int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier);
  132. void dev_pm_qos_constraints_init(struct device *dev);
  133. void dev_pm_qos_constraints_destroy(struct device *dev);
  134. int dev_pm_qos_add_ancestor_request(struct device *dev,
  135. struct dev_pm_qos_request *req,
  136. enum dev_pm_qos_req_type type, s32 value);
  137. int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value);
  138. void dev_pm_qos_hide_latency_limit(struct device *dev);
  139. int dev_pm_qos_expose_flags(struct device *dev, s32 value);
  140. void dev_pm_qos_hide_flags(struct device *dev);
  141. int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set);
  142. s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev);
  143. int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val);
  144. int dev_pm_qos_expose_latency_tolerance(struct device *dev);
  145. void dev_pm_qos_hide_latency_tolerance(struct device *dev);
  146. static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev)
  147. {
  148. return dev->power.qos->resume_latency_req->data.pnode.prio;
  149. }
  150. static inline s32 dev_pm_qos_requested_flags(struct device *dev)
  151. {
  152. return dev->power.qos->flags_req->data.flr.flags;
  153. }
  154. #else
  155. static inline enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev,
  156. s32 mask)
  157. { return PM_QOS_FLAGS_UNDEFINED; }
  158. static inline enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev,
  159. s32 mask)
  160. { return PM_QOS_FLAGS_UNDEFINED; }
  161. static inline s32 __dev_pm_qos_read_value(struct device *dev)
  162. { return 0; }
  163. static inline s32 dev_pm_qos_read_value(struct device *dev)
  164. { return 0; }
  165. static inline int dev_pm_qos_add_request(struct device *dev,
  166. struct dev_pm_qos_request *req,
  167. enum dev_pm_qos_req_type type,
  168. s32 value)
  169. { return 0; }
  170. static inline int dev_pm_qos_update_request(struct dev_pm_qos_request *req,
  171. s32 new_value)
  172. { return 0; }
  173. static inline int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
  174. { return 0; }
  175. static inline int dev_pm_qos_add_notifier(struct device *dev,
  176. struct notifier_block *notifier)
  177. { return 0; }
  178. static inline int dev_pm_qos_remove_notifier(struct device *dev,
  179. struct notifier_block *notifier)
  180. { return 0; }
  181. static inline int dev_pm_qos_add_global_notifier(
  182. struct notifier_block *notifier)
  183. { return 0; }
  184. static inline int dev_pm_qos_remove_global_notifier(
  185. struct notifier_block *notifier)
  186. { return 0; }
  187. static inline void dev_pm_qos_constraints_init(struct device *dev)
  188. {
  189. dev->power.power_state = PMSG_ON;
  190. }
  191. static inline void dev_pm_qos_constraints_destroy(struct device *dev)
  192. {
  193. dev->power.power_state = PMSG_INVALID;
  194. }
  195. static inline int dev_pm_qos_add_ancestor_request(struct device *dev,
  196. struct dev_pm_qos_request *req,
  197. enum dev_pm_qos_req_type type,
  198. s32 value)
  199. { return 0; }
  200. static inline int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
  201. { return 0; }
  202. static inline void dev_pm_qos_hide_latency_limit(struct device *dev) {}
  203. static inline int dev_pm_qos_expose_flags(struct device *dev, s32 value)
  204. { return 0; }
  205. static inline void dev_pm_qos_hide_flags(struct device *dev) {}
  206. static inline int dev_pm_qos_update_flags(struct device *dev, s32 m, bool set)
  207. { return 0; }
  208. static inline s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev)
  209. { return PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT; }
  210. static inline int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
  211. { return 0; }
  212. static inline int dev_pm_qos_expose_latency_tolerance(struct device *dev)
  213. { return 0; }
  214. static inline void dev_pm_qos_hide_latency_tolerance(struct device *dev) {}
  215. static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev) { return 0; }
  216. static inline s32 dev_pm_qos_requested_flags(struct device *dev) { return 0; }
  217. #endif
  218. #endif