power.h 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504
  1. #undef TRACE_SYSTEM
  2. #define TRACE_SYSTEM power
  3. #if !defined(_TRACE_POWER_H) || defined(TRACE_HEADER_MULTI_READ)
  4. #define _TRACE_POWER_H
  5. #include <linux/ktime.h>
  6. #include <linux/pm_qos.h>
  7. #include <linux/tracepoint.h>
  8. #include <linux/trace_events.h>
  9. #define TPS(x) tracepoint_string(x)
  10. DECLARE_EVENT_CLASS(cpu,
  11. TP_PROTO(unsigned int state, unsigned int cpu_id),
  12. TP_ARGS(state, cpu_id),
  13. TP_STRUCT__entry(
  14. __field( u32, state )
  15. __field( u32, cpu_id )
  16. ),
  17. TP_fast_assign(
  18. __entry->state = state;
  19. __entry->cpu_id = cpu_id;
  20. ),
  21. TP_printk("state=%lu cpu_id=%lu", (unsigned long)__entry->state,
  22. (unsigned long)__entry->cpu_id)
  23. );
  24. DEFINE_EVENT(cpu, cpu_idle,
  25. TP_PROTO(unsigned int state, unsigned int cpu_id),
  26. TP_ARGS(state, cpu_id)
  27. );
  28. TRACE_EVENT(powernv_throttle,
  29. TP_PROTO(int chip_id, const char *reason, int pmax),
  30. TP_ARGS(chip_id, reason, pmax),
  31. TP_STRUCT__entry(
  32. __field(int, chip_id)
  33. __string(reason, reason)
  34. __field(int, pmax)
  35. ),
  36. TP_fast_assign(
  37. __entry->chip_id = chip_id;
  38. __assign_str(reason, reason);
  39. __entry->pmax = pmax;
  40. ),
  41. TP_printk("Chip %d Pmax %d %s", __entry->chip_id,
  42. __entry->pmax, __get_str(reason))
  43. );
  44. TRACE_EVENT(pstate_sample,
  45. TP_PROTO(u32 core_busy,
  46. u32 scaled_busy,
  47. u32 from,
  48. u32 to,
  49. u64 mperf,
  50. u64 aperf,
  51. u64 tsc,
  52. u32 freq
  53. ),
  54. TP_ARGS(core_busy,
  55. scaled_busy,
  56. from,
  57. to,
  58. mperf,
  59. aperf,
  60. tsc,
  61. freq
  62. ),
  63. TP_STRUCT__entry(
  64. __field(u32, core_busy)
  65. __field(u32, scaled_busy)
  66. __field(u32, from)
  67. __field(u32, to)
  68. __field(u64, mperf)
  69. __field(u64, aperf)
  70. __field(u64, tsc)
  71. __field(u32, freq)
  72. ),
  73. TP_fast_assign(
  74. __entry->core_busy = core_busy;
  75. __entry->scaled_busy = scaled_busy;
  76. __entry->from = from;
  77. __entry->to = to;
  78. __entry->mperf = mperf;
  79. __entry->aperf = aperf;
  80. __entry->tsc = tsc;
  81. __entry->freq = freq;
  82. ),
  83. TP_printk("core_busy=%lu scaled=%lu from=%lu to=%lu mperf=%llu aperf=%llu tsc=%llu freq=%lu ",
  84. (unsigned long)__entry->core_busy,
  85. (unsigned long)__entry->scaled_busy,
  86. (unsigned long)__entry->from,
  87. (unsigned long)__entry->to,
  88. (unsigned long long)__entry->mperf,
  89. (unsigned long long)__entry->aperf,
  90. (unsigned long long)__entry->tsc,
  91. (unsigned long)__entry->freq
  92. )
  93. );
  94. /* This file can get included multiple times, TRACE_HEADER_MULTI_READ at top */
  95. #ifndef _PWR_EVENT_AVOID_DOUBLE_DEFINING
  96. #define _PWR_EVENT_AVOID_DOUBLE_DEFINING
  97. #define PWR_EVENT_EXIT -1
  98. #endif
  99. #define pm_verb_symbolic(event) \
  100. __print_symbolic(event, \
  101. { PM_EVENT_SUSPEND, "suspend" }, \
  102. { PM_EVENT_RESUME, "resume" }, \
  103. { PM_EVENT_FREEZE, "freeze" }, \
  104. { PM_EVENT_QUIESCE, "quiesce" }, \
  105. { PM_EVENT_HIBERNATE, "hibernate" }, \
  106. { PM_EVENT_THAW, "thaw" }, \
  107. { PM_EVENT_RESTORE, "restore" }, \
  108. { PM_EVENT_RECOVER, "recover" })
  109. DEFINE_EVENT(cpu, cpu_frequency,
  110. TP_PROTO(unsigned int frequency, unsigned int cpu_id),
  111. TP_ARGS(frequency, cpu_id)
  112. );
  113. TRACE_EVENT(device_pm_callback_start,
  114. TP_PROTO(struct device *dev, const char *pm_ops, int event),
  115. TP_ARGS(dev, pm_ops, event),
  116. TP_STRUCT__entry(
  117. __string(device, dev_name(dev))
  118. __string(driver, dev_driver_string(dev))
  119. __string(parent, dev->parent ? dev_name(dev->parent) : "none")
  120. __string(pm_ops, pm_ops ? pm_ops : "none ")
  121. __field(int, event)
  122. ),
  123. TP_fast_assign(
  124. __assign_str(device, dev_name(dev));
  125. __assign_str(driver, dev_driver_string(dev));
  126. __assign_str(parent,
  127. dev->parent ? dev_name(dev->parent) : "none");
  128. __assign_str(pm_ops, pm_ops ? pm_ops : "none ");
  129. __entry->event = event;
  130. ),
  131. TP_printk("%s %s, parent: %s, %s[%s]", __get_str(driver),
  132. __get_str(device), __get_str(parent), __get_str(pm_ops),
  133. pm_verb_symbolic(__entry->event))
  134. );
  135. TRACE_EVENT(device_pm_callback_end,
  136. TP_PROTO(struct device *dev, int error),
  137. TP_ARGS(dev, error),
  138. TP_STRUCT__entry(
  139. __string(device, dev_name(dev))
  140. __string(driver, dev_driver_string(dev))
  141. __field(int, error)
  142. ),
  143. TP_fast_assign(
  144. __assign_str(device, dev_name(dev));
  145. __assign_str(driver, dev_driver_string(dev));
  146. __entry->error = error;
  147. ),
  148. TP_printk("%s %s, err=%d",
  149. __get_str(driver), __get_str(device), __entry->error)
  150. );
  151. TRACE_EVENT(suspend_resume,
  152. TP_PROTO(const char *action, int val, bool start),
  153. TP_ARGS(action, val, start),
  154. TP_STRUCT__entry(
  155. __field(const char *, action)
  156. __field(int, val)
  157. __field(bool, start)
  158. ),
  159. TP_fast_assign(
  160. __entry->action = action;
  161. __entry->val = val;
  162. __entry->start = start;
  163. ),
  164. TP_printk("%s[%u] %s", __entry->action, (unsigned int)__entry->val,
  165. (__entry->start)?"begin":"end")
  166. );
  167. DECLARE_EVENT_CLASS(wakeup_source,
  168. TP_PROTO(const char *name, unsigned int state),
  169. TP_ARGS(name, state),
  170. TP_STRUCT__entry(
  171. __string( name, name )
  172. __field( u64, state )
  173. ),
  174. TP_fast_assign(
  175. __assign_str(name, name);
  176. __entry->state = state;
  177. ),
  178. TP_printk("%s state=0x%lx", __get_str(name),
  179. (unsigned long)__entry->state)
  180. );
  181. DEFINE_EVENT(wakeup_source, wakeup_source_activate,
  182. TP_PROTO(const char *name, unsigned int state),
  183. TP_ARGS(name, state)
  184. );
  185. DEFINE_EVENT(wakeup_source, wakeup_source_deactivate,
  186. TP_PROTO(const char *name, unsigned int state),
  187. TP_ARGS(name, state)
  188. );
  189. /*
  190. * The clock events are used for clock enable/disable and for
  191. * clock rate change
  192. */
  193. DECLARE_EVENT_CLASS(clock,
  194. TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id),
  195. TP_ARGS(name, state, cpu_id),
  196. TP_STRUCT__entry(
  197. __string( name, name )
  198. __field( u64, state )
  199. __field( u64, cpu_id )
  200. ),
  201. TP_fast_assign(
  202. __assign_str(name, name);
  203. __entry->state = state;
  204. __entry->cpu_id = cpu_id;
  205. ),
  206. TP_printk("%s state=%lu cpu_id=%lu", __get_str(name),
  207. (unsigned long)__entry->state, (unsigned long)__entry->cpu_id)
  208. );
  209. DEFINE_EVENT(clock, clock_enable,
  210. TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id),
  211. TP_ARGS(name, state, cpu_id)
  212. );
  213. DEFINE_EVENT(clock, clock_disable,
  214. TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id),
  215. TP_ARGS(name, state, cpu_id)
  216. );
  217. DEFINE_EVENT(clock, clock_set_rate,
  218. TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id),
  219. TP_ARGS(name, state, cpu_id)
  220. );
  221. /*
  222. * The power domain events are used for power domains transitions
  223. */
  224. DECLARE_EVENT_CLASS(power_domain,
  225. TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id),
  226. TP_ARGS(name, state, cpu_id),
  227. TP_STRUCT__entry(
  228. __string( name, name )
  229. __field( u64, state )
  230. __field( u64, cpu_id )
  231. ),
  232. TP_fast_assign(
  233. __assign_str(name, name);
  234. __entry->state = state;
  235. __entry->cpu_id = cpu_id;
  236. ),
  237. TP_printk("%s state=%lu cpu_id=%lu", __get_str(name),
  238. (unsigned long)__entry->state, (unsigned long)__entry->cpu_id)
  239. );
  240. DEFINE_EVENT(power_domain, power_domain_target,
  241. TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id),
  242. TP_ARGS(name, state, cpu_id)
  243. );
  244. /*
  245. * The pm qos events are used for pm qos update
  246. */
  247. DECLARE_EVENT_CLASS(pm_qos_request,
  248. TP_PROTO(int pm_qos_class, s32 value),
  249. TP_ARGS(pm_qos_class, value),
  250. TP_STRUCT__entry(
  251. __field( int, pm_qos_class )
  252. __field( s32, value )
  253. ),
  254. TP_fast_assign(
  255. __entry->pm_qos_class = pm_qos_class;
  256. __entry->value = value;
  257. ),
  258. TP_printk("pm_qos_class=%s value=%d",
  259. __print_symbolic(__entry->pm_qos_class,
  260. { PM_QOS_CPU_DMA_LATENCY, "CPU_DMA_LATENCY" },
  261. { PM_QOS_NETWORK_LATENCY, "NETWORK_LATENCY" },
  262. { PM_QOS_NETWORK_THROUGHPUT, "NETWORK_THROUGHPUT" }),
  263. __entry->value)
  264. );
  265. DEFINE_EVENT(pm_qos_request, pm_qos_add_request,
  266. TP_PROTO(int pm_qos_class, s32 value),
  267. TP_ARGS(pm_qos_class, value)
  268. );
  269. DEFINE_EVENT(pm_qos_request, pm_qos_update_request,
  270. TP_PROTO(int pm_qos_class, s32 value),
  271. TP_ARGS(pm_qos_class, value)
  272. );
  273. DEFINE_EVENT(pm_qos_request, pm_qos_remove_request,
  274. TP_PROTO(int pm_qos_class, s32 value),
  275. TP_ARGS(pm_qos_class, value)
  276. );
  277. TRACE_EVENT(pm_qos_update_request_timeout,
  278. TP_PROTO(int pm_qos_class, s32 value, unsigned long timeout_us),
  279. TP_ARGS(pm_qos_class, value, timeout_us),
  280. TP_STRUCT__entry(
  281. __field( int, pm_qos_class )
  282. __field( s32, value )
  283. __field( unsigned long, timeout_us )
  284. ),
  285. TP_fast_assign(
  286. __entry->pm_qos_class = pm_qos_class;
  287. __entry->value = value;
  288. __entry->timeout_us = timeout_us;
  289. ),
  290. TP_printk("pm_qos_class=%s value=%d, timeout_us=%ld",
  291. __print_symbolic(__entry->pm_qos_class,
  292. { PM_QOS_CPU_DMA_LATENCY, "CPU_DMA_LATENCY" },
  293. { PM_QOS_NETWORK_LATENCY, "NETWORK_LATENCY" },
  294. { PM_QOS_NETWORK_THROUGHPUT, "NETWORK_THROUGHPUT" }),
  295. __entry->value, __entry->timeout_us)
  296. );
  297. DECLARE_EVENT_CLASS(pm_qos_update,
  298. TP_PROTO(enum pm_qos_req_action action, int prev_value, int curr_value),
  299. TP_ARGS(action, prev_value, curr_value),
  300. TP_STRUCT__entry(
  301. __field( enum pm_qos_req_action, action )
  302. __field( int, prev_value )
  303. __field( int, curr_value )
  304. ),
  305. TP_fast_assign(
  306. __entry->action = action;
  307. __entry->prev_value = prev_value;
  308. __entry->curr_value = curr_value;
  309. ),
  310. TP_printk("action=%s prev_value=%d curr_value=%d",
  311. __print_symbolic(__entry->action,
  312. { PM_QOS_ADD_REQ, "ADD_REQ" },
  313. { PM_QOS_UPDATE_REQ, "UPDATE_REQ" },
  314. { PM_QOS_REMOVE_REQ, "REMOVE_REQ" }),
  315. __entry->prev_value, __entry->curr_value)
  316. );
  317. DEFINE_EVENT(pm_qos_update, pm_qos_update_target,
  318. TP_PROTO(enum pm_qos_req_action action, int prev_value, int curr_value),
  319. TP_ARGS(action, prev_value, curr_value)
  320. );
  321. DEFINE_EVENT_PRINT(pm_qos_update, pm_qos_update_flags,
  322. TP_PROTO(enum pm_qos_req_action action, int prev_value, int curr_value),
  323. TP_ARGS(action, prev_value, curr_value),
  324. TP_printk("action=%s prev_value=0x%x curr_value=0x%x",
  325. __print_symbolic(__entry->action,
  326. { PM_QOS_ADD_REQ, "ADD_REQ" },
  327. { PM_QOS_UPDATE_REQ, "UPDATE_REQ" },
  328. { PM_QOS_REMOVE_REQ, "REMOVE_REQ" }),
  329. __entry->prev_value, __entry->curr_value)
  330. );
  331. DECLARE_EVENT_CLASS(dev_pm_qos_request,
  332. TP_PROTO(const char *name, enum dev_pm_qos_req_type type,
  333. s32 new_value),
  334. TP_ARGS(name, type, new_value),
  335. TP_STRUCT__entry(
  336. __string( name, name )
  337. __field( enum dev_pm_qos_req_type, type )
  338. __field( s32, new_value )
  339. ),
  340. TP_fast_assign(
  341. __assign_str(name, name);
  342. __entry->type = type;
  343. __entry->new_value = new_value;
  344. ),
  345. TP_printk("device=%s type=%s new_value=%d",
  346. __get_str(name),
  347. __print_symbolic(__entry->type,
  348. { DEV_PM_QOS_RESUME_LATENCY, "DEV_PM_QOS_RESUME_LATENCY" },
  349. { DEV_PM_QOS_FLAGS, "DEV_PM_QOS_FLAGS" }),
  350. __entry->new_value)
  351. );
  352. DEFINE_EVENT(dev_pm_qos_request, dev_pm_qos_add_request,
  353. TP_PROTO(const char *name, enum dev_pm_qos_req_type type,
  354. s32 new_value),
  355. TP_ARGS(name, type, new_value)
  356. );
  357. DEFINE_EVENT(dev_pm_qos_request, dev_pm_qos_update_request,
  358. TP_PROTO(const char *name, enum dev_pm_qos_req_type type,
  359. s32 new_value),
  360. TP_ARGS(name, type, new_value)
  361. );
  362. DEFINE_EVENT(dev_pm_qos_request, dev_pm_qos_remove_request,
  363. TP_PROTO(const char *name, enum dev_pm_qos_req_type type,
  364. s32 new_value),
  365. TP_ARGS(name, type, new_value)
  366. );
  367. #endif /* _TRACE_POWER_H */
  368. /* This part must be outside protection */
  369. #include <trace/define_trace.h>