pm_qos.h 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238
  1. #ifndef _LINUX_PM_QOS_H
  2. #define _LINUX_PM_QOS_H
  3. /* interface for the pm_qos_power infrastructure of the linux kernel.
  4. *
  5. * Mark Gross <mgross@linux.intel.com>
  6. */
  7. #include <linux/plist.h>
  8. #include <linux/notifier.h>
  9. #include <linux/miscdevice.h>
  10. #include <linux/device.h>
  11. #include <linux/workqueue.h>
  12. enum {
  13. PM_QOS_RESERVED = 0,
  14. PM_QOS_CPU_DMA_LATENCY,
  15. PM_QOS_NETWORK_LATENCY,
  16. PM_QOS_NETWORK_THROUGHPUT,
  17. /* insert new class ID */
  18. PM_QOS_NUM_CLASSES,
  19. };
  20. enum pm_qos_flags_status {
  21. PM_QOS_FLAGS_UNDEFINED = -1,
  22. PM_QOS_FLAGS_NONE,
  23. PM_QOS_FLAGS_SOME,
  24. PM_QOS_FLAGS_ALL,
  25. };
  26. #define PM_QOS_DEFAULT_VALUE -1
  27. #define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
  28. #define PM_QOS_NETWORK_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
  29. #define PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE 0
  30. #define PM_QOS_RESUME_LATENCY_DEFAULT_VALUE 0
  31. #define PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE 0
  32. #define PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT (-1)
  33. #define PM_QOS_LATENCY_ANY ((s32)(~(__u32)0 >> 1))
  34. #define PM_QOS_FLAG_NO_POWER_OFF (1 << 0)
  35. #define PM_QOS_FLAG_REMOTE_WAKEUP (1 << 1)
  36. struct pm_qos_request {
  37. struct plist_node node;
  38. int pm_qos_class;
  39. struct delayed_work work; /* for pm_qos_update_request_timeout */
  40. };
  41. struct pm_qos_flags_request {
  42. struct list_head node;
  43. s32 flags; /* Do not change to 64 bit */
  44. };
  45. enum dev_pm_qos_req_type {
  46. DEV_PM_QOS_RESUME_LATENCY = 1,
  47. DEV_PM_QOS_LATENCY_TOLERANCE,
  48. DEV_PM_QOS_FLAGS,
  49. };
  50. struct dev_pm_qos_request {
  51. enum dev_pm_qos_req_type type;
  52. union {
  53. struct plist_node pnode;
  54. struct pm_qos_flags_request flr;
  55. } data;
  56. struct device *dev;
  57. };
  58. enum pm_qos_type {
  59. PM_QOS_UNITIALIZED,
  60. PM_QOS_MAX, /* return the largest value */
  61. PM_QOS_MIN /* return the smallest value */
  62. };
  63. /*
  64. * Note: The lockless read path depends on the CPU accessing target_value
  65. * or effective_flags atomically. Atomic access is only guaranteed on all CPU
  66. * types linux supports for 32 bit quantites
  67. */
  68. struct pm_qos_constraints {
  69. struct plist_head list;
  70. s32 target_value; /* Do not change to 64 bit */
  71. s32 default_value;
  72. s32 no_constraint_value;
  73. enum pm_qos_type type;
  74. struct blocking_notifier_head *notifiers;
  75. };
  76. struct pm_qos_flags {
  77. struct list_head list;
  78. s32 effective_flags; /* Do not change to 64 bit */
  79. };
  80. struct dev_pm_qos {
  81. struct pm_qos_constraints resume_latency;
  82. struct pm_qos_constraints latency_tolerance;
  83. struct pm_qos_flags flags;
  84. struct dev_pm_qos_request *resume_latency_req;
  85. struct dev_pm_qos_request *latency_tolerance_req;
  86. struct dev_pm_qos_request *flags_req;
  87. };
  88. /* Action requested to pm_qos_update_target */
  89. enum pm_qos_req_action {
  90. PM_QOS_ADD_REQ, /* Add a new request */
  91. PM_QOS_UPDATE_REQ, /* Update an existing request */
  92. PM_QOS_REMOVE_REQ /* Remove an existing request */
  93. };
  94. static inline int dev_pm_qos_request_active(struct dev_pm_qos_request *req)
  95. {
  96. return req->dev != NULL;
  97. }
  98. int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
  99. enum pm_qos_req_action action, int value);
  100. bool pm_qos_update_flags(struct pm_qos_flags *pqf,
  101. struct pm_qos_flags_request *req,
  102. enum pm_qos_req_action action, s32 val);
  103. void pm_qos_add_request(struct pm_qos_request *req, int pm_qos_class,
  104. s32 value);
  105. void pm_qos_update_request(struct pm_qos_request *req,
  106. s32 new_value);
  107. void pm_qos_update_request_timeout(struct pm_qos_request *req,
  108. s32 new_value, unsigned long timeout_us);
  109. void pm_qos_remove_request(struct pm_qos_request *req);
  110. int pm_qos_request(int pm_qos_class);
  111. int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier);
  112. int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier);
  113. int pm_qos_request_active(struct pm_qos_request *req);
  114. s32 pm_qos_read_value(struct pm_qos_constraints *c);
  115. #ifdef CONFIG_PM
  116. enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask);
  117. enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask);
  118. s32 __dev_pm_qos_read_value(struct device *dev);
  119. s32 dev_pm_qos_read_value(struct device *dev);
  120. int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
  121. enum dev_pm_qos_req_type type, s32 value);
  122. int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value);
  123. int dev_pm_qos_remove_request(struct dev_pm_qos_request *req);
  124. int dev_pm_qos_add_notifier(struct device *dev,
  125. struct notifier_block *notifier);
  126. int dev_pm_qos_remove_notifier(struct device *dev,
  127. struct notifier_block *notifier);
  128. int dev_pm_qos_add_global_notifier(struct notifier_block *notifier);
  129. int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier);
  130. void dev_pm_qos_constraints_init(struct device *dev);
  131. void dev_pm_qos_constraints_destroy(struct device *dev);
  132. int dev_pm_qos_add_ancestor_request(struct device *dev,
  133. struct dev_pm_qos_request *req,
  134. enum dev_pm_qos_req_type type, s32 value);
  135. #else
  136. static inline enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev,
  137. s32 mask)
  138. { return PM_QOS_FLAGS_UNDEFINED; }
  139. static inline enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev,
  140. s32 mask)
  141. { return PM_QOS_FLAGS_UNDEFINED; }
  142. static inline s32 __dev_pm_qos_read_value(struct device *dev)
  143. { return 0; }
  144. static inline s32 dev_pm_qos_read_value(struct device *dev)
  145. { return 0; }
  146. static inline int dev_pm_qos_add_request(struct device *dev,
  147. struct dev_pm_qos_request *req,
  148. enum dev_pm_qos_req_type type,
  149. s32 value)
  150. { return 0; }
  151. static inline int dev_pm_qos_update_request(struct dev_pm_qos_request *req,
  152. s32 new_value)
  153. { return 0; }
  154. static inline int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
  155. { return 0; }
  156. static inline int dev_pm_qos_add_notifier(struct device *dev,
  157. struct notifier_block *notifier)
  158. { return 0; }
  159. static inline int dev_pm_qos_remove_notifier(struct device *dev,
  160. struct notifier_block *notifier)
  161. { return 0; }
  162. static inline int dev_pm_qos_add_global_notifier(
  163. struct notifier_block *notifier)
  164. { return 0; }
  165. static inline int dev_pm_qos_remove_global_notifier(
  166. struct notifier_block *notifier)
  167. { return 0; }
  168. static inline void dev_pm_qos_constraints_init(struct device *dev)
  169. {
  170. dev->power.power_state = PMSG_ON;
  171. }
  172. static inline void dev_pm_qos_constraints_destroy(struct device *dev)
  173. {
  174. dev->power.power_state = PMSG_INVALID;
  175. }
  176. static inline int dev_pm_qos_add_ancestor_request(struct device *dev,
  177. struct dev_pm_qos_request *req,
  178. enum dev_pm_qos_req_type type,
  179. s32 value)
  180. { return 0; }
  181. #endif
  182. #ifdef CONFIG_PM_RUNTIME
  183. int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value);
  184. void dev_pm_qos_hide_latency_limit(struct device *dev);
  185. int dev_pm_qos_expose_flags(struct device *dev, s32 value);
  186. void dev_pm_qos_hide_flags(struct device *dev);
  187. int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set);
  188. s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev);
  189. int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val);
  190. static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev)
  191. {
  192. return dev->power.qos->resume_latency_req->data.pnode.prio;
  193. }
  194. static inline s32 dev_pm_qos_requested_flags(struct device *dev)
  195. {
  196. return dev->power.qos->flags_req->data.flr.flags;
  197. }
  198. #else
  199. static inline int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
  200. { return 0; }
  201. static inline void dev_pm_qos_hide_latency_limit(struct device *dev) {}
  202. static inline int dev_pm_qos_expose_flags(struct device *dev, s32 value)
  203. { return 0; }
  204. static inline void dev_pm_qos_hide_flags(struct device *dev) {}
  205. static inline int dev_pm_qos_update_flags(struct device *dev, s32 m, bool set)
  206. { return 0; }
  207. static inline s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev)
  208. { return PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT; }
  209. static inline int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
  210. { return 0; }
  211. static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev) { return 0; }
  212. static inline s32 dev_pm_qos_requested_flags(struct device *dev) { return 0; }
  213. #endif
  214. #endif