pm_qos.h 8.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239
  1. #ifndef _LINUX_PM_QOS_H
  2. #define _LINUX_PM_QOS_H
  3. /* interface for the pm_qos_power infrastructure of the linux kernel.
  4. *
  5. * Mark Gross <mgross@linux.intel.com>
  6. */
  7. #include <linux/plist.h>
  8. #include <linux/notifier.h>
  9. #include <linux/device.h>
  10. #include <linux/workqueue.h>
  11. enum {
  12. PM_QOS_RESERVED = 0,
  13. PM_QOS_CPU_DMA_LATENCY,
  14. PM_QOS_NETWORK_LATENCY,
  15. PM_QOS_NETWORK_THROUGHPUT,
  16. PM_QOS_MEMORY_BANDWIDTH,
  17. /* insert new class ID */
  18. PM_QOS_NUM_CLASSES,
  19. };
  20. enum pm_qos_flags_status {
  21. PM_QOS_FLAGS_UNDEFINED = -1,
  22. PM_QOS_FLAGS_NONE,
  23. PM_QOS_FLAGS_SOME,
  24. PM_QOS_FLAGS_ALL,
  25. };
  26. #define PM_QOS_DEFAULT_VALUE -1
  27. #define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
  28. #define PM_QOS_NETWORK_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
  29. #define PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE 0
  30. #define PM_QOS_MEMORY_BANDWIDTH_DEFAULT_VALUE 0
  31. #define PM_QOS_RESUME_LATENCY_DEFAULT_VALUE 0
  32. #define PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE 0
  33. #define PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT (-1)
  34. #define PM_QOS_LATENCY_ANY ((s32)(~(__u32)0 >> 1))
  35. #define PM_QOS_FLAG_NO_POWER_OFF (1 << 0)
  36. struct pm_qos_request {
  37. struct plist_node node;
  38. int pm_qos_class;
  39. struct delayed_work work; /* for pm_qos_update_request_timeout */
  40. };
  41. struct pm_qos_flags_request {
  42. struct list_head node;
  43. s32 flags; /* Do not change to 64 bit */
  44. };
  45. enum dev_pm_qos_req_type {
  46. DEV_PM_QOS_RESUME_LATENCY = 1,
  47. DEV_PM_QOS_LATENCY_TOLERANCE,
  48. DEV_PM_QOS_FLAGS,
  49. };
  50. struct dev_pm_qos_request {
  51. enum dev_pm_qos_req_type type;
  52. union {
  53. struct plist_node pnode;
  54. struct pm_qos_flags_request flr;
  55. } data;
  56. struct device *dev;
  57. };
  58. enum pm_qos_type {
  59. PM_QOS_UNITIALIZED,
  60. PM_QOS_MAX, /* return the largest value */
  61. PM_QOS_MIN, /* return the smallest value */
  62. PM_QOS_SUM /* return the sum */
  63. };
  64. /*
  65. * Note: The lockless read path depends on the CPU accessing target_value
  66. * or effective_flags atomically. Atomic access is only guaranteed on all CPU
  67. * types linux supports for 32 bit quantites
  68. */
  69. struct pm_qos_constraints {
  70. struct plist_head list;
  71. s32 target_value; /* Do not change to 64 bit */
  72. s32 default_value;
  73. s32 no_constraint_value;
  74. enum pm_qos_type type;
  75. struct blocking_notifier_head *notifiers;
  76. };
  77. struct pm_qos_flags {
  78. struct list_head list;
  79. s32 effective_flags; /* Do not change to 64 bit */
  80. };
  81. struct dev_pm_qos {
  82. struct pm_qos_constraints resume_latency;
  83. struct pm_qos_constraints latency_tolerance;
  84. struct pm_qos_flags flags;
  85. struct dev_pm_qos_request *resume_latency_req;
  86. struct dev_pm_qos_request *latency_tolerance_req;
  87. struct dev_pm_qos_request *flags_req;
  88. };
  89. /* Action requested to pm_qos_update_target */
  90. enum pm_qos_req_action {
  91. PM_QOS_ADD_REQ, /* Add a new request */
  92. PM_QOS_UPDATE_REQ, /* Update an existing request */
  93. PM_QOS_REMOVE_REQ /* Remove an existing request */
  94. };
  95. static inline int dev_pm_qos_request_active(struct dev_pm_qos_request *req)
  96. {
  97. return req->dev != NULL;
  98. }
  99. int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
  100. enum pm_qos_req_action action, int value);
  101. bool pm_qos_update_flags(struct pm_qos_flags *pqf,
  102. struct pm_qos_flags_request *req,
  103. enum pm_qos_req_action action, s32 val);
  104. void pm_qos_add_request(struct pm_qos_request *req, int pm_qos_class,
  105. s32 value);
  106. void pm_qos_update_request(struct pm_qos_request *req,
  107. s32 new_value);
  108. void pm_qos_update_request_timeout(struct pm_qos_request *req,
  109. s32 new_value, unsigned long timeout_us);
  110. void pm_qos_remove_request(struct pm_qos_request *req);
  111. int pm_qos_request(int pm_qos_class);
  112. int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier);
  113. int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier);
  114. int pm_qos_request_active(struct pm_qos_request *req);
  115. s32 pm_qos_read_value(struct pm_qos_constraints *c);
  116. #ifdef CONFIG_PM
  117. enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask);
  118. enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask);
  119. s32 __dev_pm_qos_read_value(struct device *dev);
  120. s32 dev_pm_qos_read_value(struct device *dev);
  121. int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
  122. enum dev_pm_qos_req_type type, s32 value);
  123. int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value);
  124. int dev_pm_qos_remove_request(struct dev_pm_qos_request *req);
  125. int dev_pm_qos_add_notifier(struct device *dev,
  126. struct notifier_block *notifier);
  127. int dev_pm_qos_remove_notifier(struct device *dev,
  128. struct notifier_block *notifier);
  129. void dev_pm_qos_constraints_init(struct device *dev);
  130. void dev_pm_qos_constraints_destroy(struct device *dev);
  131. int dev_pm_qos_add_ancestor_request(struct device *dev,
  132. struct dev_pm_qos_request *req,
  133. enum dev_pm_qos_req_type type, s32 value);
  134. int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value);
  135. void dev_pm_qos_hide_latency_limit(struct device *dev);
  136. int dev_pm_qos_expose_flags(struct device *dev, s32 value);
  137. void dev_pm_qos_hide_flags(struct device *dev);
  138. int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set);
  139. s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev);
  140. int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val);
  141. int dev_pm_qos_expose_latency_tolerance(struct device *dev);
  142. void dev_pm_qos_hide_latency_tolerance(struct device *dev);
  143. static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev)
  144. {
  145. return dev->power.qos->resume_latency_req->data.pnode.prio;
  146. }
  147. static inline s32 dev_pm_qos_requested_flags(struct device *dev)
  148. {
  149. return dev->power.qos->flags_req->data.flr.flags;
  150. }
  151. static inline s32 dev_pm_qos_raw_read_value(struct device *dev)
  152. {
  153. return IS_ERR_OR_NULL(dev->power.qos) ?
  154. 0 : pm_qos_read_value(&dev->power.qos->resume_latency);
  155. }
  156. #else
  157. static inline enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev,
  158. s32 mask)
  159. { return PM_QOS_FLAGS_UNDEFINED; }
  160. static inline enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev,
  161. s32 mask)
  162. { return PM_QOS_FLAGS_UNDEFINED; }
  163. static inline s32 __dev_pm_qos_read_value(struct device *dev)
  164. { return 0; }
  165. static inline s32 dev_pm_qos_read_value(struct device *dev)
  166. { return 0; }
  167. static inline int dev_pm_qos_add_request(struct device *dev,
  168. struct dev_pm_qos_request *req,
  169. enum dev_pm_qos_req_type type,
  170. s32 value)
  171. { return 0; }
  172. static inline int dev_pm_qos_update_request(struct dev_pm_qos_request *req,
  173. s32 new_value)
  174. { return 0; }
  175. static inline int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
  176. { return 0; }
  177. static inline int dev_pm_qos_add_notifier(struct device *dev,
  178. struct notifier_block *notifier)
  179. { return 0; }
  180. static inline int dev_pm_qos_remove_notifier(struct device *dev,
  181. struct notifier_block *notifier)
  182. { return 0; }
  183. static inline void dev_pm_qos_constraints_init(struct device *dev)
  184. {
  185. dev->power.power_state = PMSG_ON;
  186. }
  187. static inline void dev_pm_qos_constraints_destroy(struct device *dev)
  188. {
  189. dev->power.power_state = PMSG_INVALID;
  190. }
  191. static inline int dev_pm_qos_add_ancestor_request(struct device *dev,
  192. struct dev_pm_qos_request *req,
  193. enum dev_pm_qos_req_type type,
  194. s32 value)
  195. { return 0; }
  196. static inline int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
  197. { return 0; }
  198. static inline void dev_pm_qos_hide_latency_limit(struct device *dev) {}
  199. static inline int dev_pm_qos_expose_flags(struct device *dev, s32 value)
  200. { return 0; }
  201. static inline void dev_pm_qos_hide_flags(struct device *dev) {}
  202. static inline int dev_pm_qos_update_flags(struct device *dev, s32 m, bool set)
  203. { return 0; }
  204. static inline s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev)
  205. { return PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT; }
  206. static inline int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
  207. { return 0; }
  208. static inline int dev_pm_qos_expose_latency_tolerance(struct device *dev)
  209. { return 0; }
  210. static inline void dev_pm_qos_hide_latency_tolerance(struct device *dev) {}
  211. static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev) { return 0; }
  212. static inline s32 dev_pm_qos_requested_flags(struct device *dev) { return 0; }
  213. static inline s32 dev_pm_qos_raw_read_value(struct device *dev) { return 0; }
  214. #endif
  215. #endif