trace_sched_wakeup.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748
  1. /*
  2. * trace task wakeup timings
  3. *
  4. * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  5. * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
  6. *
  7. * Based on code from the latency_tracer, that is:
  8. *
  9. * Copyright (C) 2004-2006 Ingo Molnar
  10. * Copyright (C) 2004 Nadia Yvette Chambers
  11. */
  12. #include <linux/module.h>
  13. #include <linux/fs.h>
  14. #include <linux/debugfs.h>
  15. #include <linux/kallsyms.h>
  16. #include <linux/uaccess.h>
  17. #include <linux/ftrace.h>
  18. #include <linux/sched/rt.h>
  19. #include <linux/sched/deadline.h>
  20. #include <trace/events/sched.h>
  21. #include "trace.h"
  22. static struct trace_array *wakeup_trace;
  23. static int __read_mostly tracer_enabled;
  24. static struct task_struct *wakeup_task;
  25. static int wakeup_cpu;
  26. static int wakeup_current_cpu;
  27. static unsigned wakeup_prio = -1;
  28. static int wakeup_rt;
  29. static int wakeup_dl;
  30. static int tracing_dl = 0;
  31. static arch_spinlock_t wakeup_lock =
  32. (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
  33. static void wakeup_reset(struct trace_array *tr);
  34. static void __wakeup_reset(struct trace_array *tr);
  35. static int wakeup_graph_entry(struct ftrace_graph_ent *trace);
  36. static void wakeup_graph_return(struct ftrace_graph_ret *trace);
  37. static int save_flags;
  38. static bool function_enabled;
  39. #define TRACE_DISPLAY_GRAPH 1
  40. static struct tracer_opt trace_opts[] = {
  41. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  42. /* display latency trace as call graph */
  43. { TRACER_OPT(display-graph, TRACE_DISPLAY_GRAPH) },
  44. #endif
  45. { } /* Empty entry */
  46. };
  47. static struct tracer_flags tracer_flags = {
  48. .val = 0,
  49. .opts = trace_opts,
  50. };
  51. #define is_graph() (tracer_flags.val & TRACE_DISPLAY_GRAPH)
  52. #ifdef CONFIG_FUNCTION_TRACER
  53. /*
  54. * Prologue for the wakeup function tracers.
  55. *
  56. * Returns 1 if it is OK to continue, and preemption
  57. * is disabled and data->disabled is incremented.
  58. * 0 if the trace is to be ignored, and preemption
  59. * is not disabled and data->disabled is
  60. * kept the same.
  61. *
  62. * Note, this function is also used outside this ifdef but
  63. * inside the #ifdef of the function graph tracer below.
  64. * This is OK, since the function graph tracer is
  65. * dependent on the function tracer.
  66. */
  67. static int
  68. func_prolog_preempt_disable(struct trace_array *tr,
  69. struct trace_array_cpu **data,
  70. int *pc)
  71. {
  72. long disabled;
  73. int cpu;
  74. if (likely(!wakeup_task))
  75. return 0;
  76. *pc = preempt_count();
  77. preempt_disable_notrace();
  78. cpu = raw_smp_processor_id();
  79. if (cpu != wakeup_current_cpu)
  80. goto out_enable;
  81. *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
  82. disabled = atomic_inc_return(&(*data)->disabled);
  83. if (unlikely(disabled != 1))
  84. goto out;
  85. return 1;
  86. out:
  87. atomic_dec(&(*data)->disabled);
  88. out_enable:
  89. preempt_enable_notrace();
  90. return 0;
  91. }
  92. /*
  93. * wakeup uses its own tracer function to keep the overhead down:
  94. */
  95. static void
  96. wakeup_tracer_call(unsigned long ip, unsigned long parent_ip,
  97. struct ftrace_ops *op, struct pt_regs *pt_regs)
  98. {
  99. struct trace_array *tr = wakeup_trace;
  100. struct trace_array_cpu *data;
  101. unsigned long flags;
  102. int pc;
  103. if (!func_prolog_preempt_disable(tr, &data, &pc))
  104. return;
  105. local_irq_save(flags);
  106. trace_function(tr, ip, parent_ip, flags, pc);
  107. local_irq_restore(flags);
  108. atomic_dec(&data->disabled);
  109. preempt_enable_notrace();
  110. }
  111. static struct ftrace_ops trace_ops __read_mostly =
  112. {
  113. .func = wakeup_tracer_call,
  114. .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
  115. };
  116. #endif /* CONFIG_FUNCTION_TRACER */
  117. static int register_wakeup_function(int graph, int set)
  118. {
  119. int ret;
  120. /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
  121. if (function_enabled || (!set && !(trace_flags & TRACE_ITER_FUNCTION)))
  122. return 0;
  123. if (graph)
  124. ret = register_ftrace_graph(&wakeup_graph_return,
  125. &wakeup_graph_entry);
  126. else
  127. ret = register_ftrace_function(&trace_ops);
  128. if (!ret)
  129. function_enabled = true;
  130. return ret;
  131. }
  132. static void unregister_wakeup_function(int graph)
  133. {
  134. if (!function_enabled)
  135. return;
  136. if (graph)
  137. unregister_ftrace_graph();
  138. else
  139. unregister_ftrace_function(&trace_ops);
  140. function_enabled = false;
  141. }
  142. static void wakeup_function_set(int set)
  143. {
  144. if (set)
  145. register_wakeup_function(is_graph(), 1);
  146. else
  147. unregister_wakeup_function(is_graph());
  148. }
  149. static int wakeup_flag_changed(struct tracer *tracer, u32 mask, int set)
  150. {
  151. if (mask & TRACE_ITER_FUNCTION)
  152. wakeup_function_set(set);
  153. return trace_keep_overwrite(tracer, mask, set);
  154. }
  155. static int start_func_tracer(int graph)
  156. {
  157. int ret;
  158. ret = register_wakeup_function(graph, 0);
  159. if (!ret && tracing_is_enabled())
  160. tracer_enabled = 1;
  161. else
  162. tracer_enabled = 0;
  163. return ret;
  164. }
  165. static void stop_func_tracer(int graph)
  166. {
  167. tracer_enabled = 0;
  168. unregister_wakeup_function(graph);
  169. }
  170. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  171. static int wakeup_set_flag(u32 old_flags, u32 bit, int set)
  172. {
  173. if (!(bit & TRACE_DISPLAY_GRAPH))
  174. return -EINVAL;
  175. if (!(is_graph() ^ set))
  176. return 0;
  177. stop_func_tracer(!set);
  178. wakeup_reset(wakeup_trace);
  179. tracing_max_latency = 0;
  180. return start_func_tracer(set);
  181. }
  182. static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
  183. {
  184. struct trace_array *tr = wakeup_trace;
  185. struct trace_array_cpu *data;
  186. unsigned long flags;
  187. int pc, ret = 0;
  188. if (!func_prolog_preempt_disable(tr, &data, &pc))
  189. return 0;
  190. local_save_flags(flags);
  191. ret = __trace_graph_entry(tr, trace, flags, pc);
  192. atomic_dec(&data->disabled);
  193. preempt_enable_notrace();
  194. return ret;
  195. }
  196. static void wakeup_graph_return(struct ftrace_graph_ret *trace)
  197. {
  198. struct trace_array *tr = wakeup_trace;
  199. struct trace_array_cpu *data;
  200. unsigned long flags;
  201. int pc;
  202. if (!func_prolog_preempt_disable(tr, &data, &pc))
  203. return;
  204. local_save_flags(flags);
  205. __trace_graph_return(tr, trace, flags, pc);
  206. atomic_dec(&data->disabled);
  207. preempt_enable_notrace();
  208. return;
  209. }
  210. static void wakeup_trace_open(struct trace_iterator *iter)
  211. {
  212. if (is_graph())
  213. graph_trace_open(iter);
  214. }
  215. static void wakeup_trace_close(struct trace_iterator *iter)
  216. {
  217. if (iter->private)
  218. graph_trace_close(iter);
  219. }
  220. #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_PROC | \
  221. TRACE_GRAPH_PRINT_ABS_TIME | \
  222. TRACE_GRAPH_PRINT_DURATION)
  223. static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
  224. {
  225. /*
  226. * In graph mode call the graph tracer output function,
  227. * otherwise go with the TRACE_FN event handler
  228. */
  229. if (is_graph())
  230. return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
  231. return TRACE_TYPE_UNHANDLED;
  232. }
  233. static void wakeup_print_header(struct seq_file *s)
  234. {
  235. if (is_graph())
  236. print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
  237. else
  238. trace_default_header(s);
  239. }
  240. static void
  241. __trace_function(struct trace_array *tr,
  242. unsigned long ip, unsigned long parent_ip,
  243. unsigned long flags, int pc)
  244. {
  245. if (is_graph())
  246. trace_graph_function(tr, ip, parent_ip, flags, pc);
  247. else
  248. trace_function(tr, ip, parent_ip, flags, pc);
  249. }
  250. #else
  251. #define __trace_function trace_function
  252. static int wakeup_set_flag(u32 old_flags, u32 bit, int set)
  253. {
  254. return -EINVAL;
  255. }
  256. static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
  257. {
  258. return -1;
  259. }
  260. static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
  261. {
  262. return TRACE_TYPE_UNHANDLED;
  263. }
  264. static void wakeup_graph_return(struct ftrace_graph_ret *trace) { }
  265. static void wakeup_trace_open(struct trace_iterator *iter) { }
  266. static void wakeup_trace_close(struct trace_iterator *iter) { }
  267. #ifdef CONFIG_FUNCTION_TRACER
  268. static void wakeup_print_header(struct seq_file *s)
  269. {
  270. trace_default_header(s);
  271. }
  272. #else
  273. static void wakeup_print_header(struct seq_file *s)
  274. {
  275. trace_latency_header(s);
  276. }
  277. #endif /* CONFIG_FUNCTION_TRACER */
  278. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
  279. /*
  280. * Should this new latency be reported/recorded?
  281. */
  282. static int report_latency(cycle_t delta)
  283. {
  284. if (tracing_thresh) {
  285. if (delta < tracing_thresh)
  286. return 0;
  287. } else {
  288. if (delta <= tracing_max_latency)
  289. return 0;
  290. }
  291. return 1;
  292. }
  293. static void
  294. probe_wakeup_migrate_task(void *ignore, struct task_struct *task, int cpu)
  295. {
  296. if (task != wakeup_task)
  297. return;
  298. wakeup_current_cpu = cpu;
  299. }
  300. static void notrace
  301. probe_wakeup_sched_switch(void *ignore,
  302. struct task_struct *prev, struct task_struct *next)
  303. {
  304. struct trace_array_cpu *data;
  305. cycle_t T0, T1, delta;
  306. unsigned long flags;
  307. long disabled;
  308. int cpu;
  309. int pc;
  310. tracing_record_cmdline(prev);
  311. if (unlikely(!tracer_enabled))
  312. return;
  313. /*
  314. * When we start a new trace, we set wakeup_task to NULL
  315. * and then set tracer_enabled = 1. We want to make sure
  316. * that another CPU does not see the tracer_enabled = 1
  317. * and the wakeup_task with an older task, that might
  318. * actually be the same as next.
  319. */
  320. smp_rmb();
  321. if (next != wakeup_task)
  322. return;
  323. pc = preempt_count();
  324. /* disable local data, not wakeup_cpu data */
  325. cpu = raw_smp_processor_id();
  326. disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
  327. if (likely(disabled != 1))
  328. goto out;
  329. local_irq_save(flags);
  330. arch_spin_lock(&wakeup_lock);
  331. /* We could race with grabbing wakeup_lock */
  332. if (unlikely(!tracer_enabled || next != wakeup_task))
  333. goto out_unlock;
  334. /* The task we are waiting for is waking up */
  335. data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu);
  336. __trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc);
  337. tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc);
  338. T0 = data->preempt_timestamp;
  339. T1 = ftrace_now(cpu);
  340. delta = T1-T0;
  341. if (!report_latency(delta))
  342. goto out_unlock;
  343. if (likely(!is_tracing_stopped())) {
  344. tracing_max_latency = delta;
  345. update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu);
  346. }
  347. out_unlock:
  348. __wakeup_reset(wakeup_trace);
  349. arch_spin_unlock(&wakeup_lock);
  350. local_irq_restore(flags);
  351. out:
  352. atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
  353. }
  354. static void __wakeup_reset(struct trace_array *tr)
  355. {
  356. wakeup_cpu = -1;
  357. wakeup_prio = -1;
  358. tracing_dl = 0;
  359. if (wakeup_task)
  360. put_task_struct(wakeup_task);
  361. wakeup_task = NULL;
  362. }
  363. static void wakeup_reset(struct trace_array *tr)
  364. {
  365. unsigned long flags;
  366. tracing_reset_online_cpus(&tr->trace_buffer);
  367. local_irq_save(flags);
  368. arch_spin_lock(&wakeup_lock);
  369. __wakeup_reset(tr);
  370. arch_spin_unlock(&wakeup_lock);
  371. local_irq_restore(flags);
  372. }
  373. static void
  374. probe_wakeup(void *ignore, struct task_struct *p, int success)
  375. {
  376. struct trace_array_cpu *data;
  377. int cpu = smp_processor_id();
  378. unsigned long flags;
  379. long disabled;
  380. int pc;
  381. if (likely(!tracer_enabled))
  382. return;
  383. tracing_record_cmdline(p);
  384. tracing_record_cmdline(current);
  385. /*
  386. * Semantic is like this:
  387. * - wakeup tracer handles all tasks in the system, independently
  388. * from their scheduling class;
  389. * - wakeup_rt tracer handles tasks belonging to sched_dl and
  390. * sched_rt class;
  391. * - wakeup_dl handles tasks belonging to sched_dl class only.
  392. */
  393. if (tracing_dl || (wakeup_dl && !dl_task(p)) ||
  394. (wakeup_rt && !dl_task(p) && !rt_task(p)) ||
  395. (!dl_task(p) && (p->prio >= wakeup_prio || p->prio >= current->prio)))
  396. return;
  397. pc = preempt_count();
  398. disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
  399. if (unlikely(disabled != 1))
  400. goto out;
  401. /* interrupts should be off from try_to_wake_up */
  402. arch_spin_lock(&wakeup_lock);
  403. /* check for races. */
  404. if (!tracer_enabled || tracing_dl ||
  405. (!dl_task(p) && p->prio >= wakeup_prio))
  406. goto out_locked;
  407. /* reset the trace */
  408. __wakeup_reset(wakeup_trace);
  409. wakeup_cpu = task_cpu(p);
  410. wakeup_current_cpu = wakeup_cpu;
  411. wakeup_prio = p->prio;
  412. /*
  413. * Once you start tracing a -deadline task, don't bother tracing
  414. * another task until the first one wakes up.
  415. */
  416. if (dl_task(p))
  417. tracing_dl = 1;
  418. else
  419. tracing_dl = 0;
  420. wakeup_task = p;
  421. get_task_struct(wakeup_task);
  422. local_save_flags(flags);
  423. data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu);
  424. data->preempt_timestamp = ftrace_now(cpu);
  425. tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc);
  426. /*
  427. * We must be careful in using CALLER_ADDR2. But since wake_up
  428. * is not called by an assembly function (where as schedule is)
  429. * it should be safe to use it here.
  430. */
  431. __trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
  432. out_locked:
  433. arch_spin_unlock(&wakeup_lock);
  434. out:
  435. atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
  436. }
  437. static void start_wakeup_tracer(struct trace_array *tr)
  438. {
  439. int ret;
  440. ret = register_trace_sched_wakeup(probe_wakeup, NULL);
  441. if (ret) {
  442. pr_info("wakeup trace: Couldn't activate tracepoint"
  443. " probe to kernel_sched_wakeup\n");
  444. return;
  445. }
  446. ret = register_trace_sched_wakeup_new(probe_wakeup, NULL);
  447. if (ret) {
  448. pr_info("wakeup trace: Couldn't activate tracepoint"
  449. " probe to kernel_sched_wakeup_new\n");
  450. goto fail_deprobe;
  451. }
  452. ret = register_trace_sched_switch(probe_wakeup_sched_switch, NULL);
  453. if (ret) {
  454. pr_info("sched trace: Couldn't activate tracepoint"
  455. " probe to kernel_sched_switch\n");
  456. goto fail_deprobe_wake_new;
  457. }
  458. ret = register_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL);
  459. if (ret) {
  460. pr_info("wakeup trace: Couldn't activate tracepoint"
  461. " probe to kernel_sched_migrate_task\n");
  462. return;
  463. }
  464. wakeup_reset(tr);
  465. /*
  466. * Don't let the tracer_enabled = 1 show up before
  467. * the wakeup_task is reset. This may be overkill since
  468. * wakeup_reset does a spin_unlock after setting the
  469. * wakeup_task to NULL, but I want to be safe.
  470. * This is a slow path anyway.
  471. */
  472. smp_wmb();
  473. if (start_func_tracer(is_graph()))
  474. printk(KERN_ERR "failed to start wakeup tracer\n");
  475. return;
  476. fail_deprobe_wake_new:
  477. unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
  478. fail_deprobe:
  479. unregister_trace_sched_wakeup(probe_wakeup, NULL);
  480. }
  481. static void stop_wakeup_tracer(struct trace_array *tr)
  482. {
  483. tracer_enabled = 0;
  484. stop_func_tracer(is_graph());
  485. unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL);
  486. unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
  487. unregister_trace_sched_wakeup(probe_wakeup, NULL);
  488. unregister_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL);
  489. }
  490. static int __wakeup_tracer_init(struct trace_array *tr)
  491. {
  492. save_flags = trace_flags;
  493. /* non overwrite screws up the latency tracers */
  494. set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
  495. set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
  496. tracing_max_latency = 0;
  497. wakeup_trace = tr;
  498. start_wakeup_tracer(tr);
  499. return 0;
  500. }
  501. static int wakeup_tracer_init(struct trace_array *tr)
  502. {
  503. wakeup_dl = 0;
  504. wakeup_rt = 0;
  505. return __wakeup_tracer_init(tr);
  506. }
  507. static int wakeup_rt_tracer_init(struct trace_array *tr)
  508. {
  509. wakeup_dl = 0;
  510. wakeup_rt = 1;
  511. return __wakeup_tracer_init(tr);
  512. }
  513. static int wakeup_dl_tracer_init(struct trace_array *tr)
  514. {
  515. wakeup_dl = 1;
  516. wakeup_rt = 0;
  517. return __wakeup_tracer_init(tr);
  518. }
  519. static void wakeup_tracer_reset(struct trace_array *tr)
  520. {
  521. int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
  522. int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
  523. stop_wakeup_tracer(tr);
  524. /* make sure we put back any tasks we are tracing */
  525. wakeup_reset(tr);
  526. set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
  527. set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
  528. }
  529. static void wakeup_tracer_start(struct trace_array *tr)
  530. {
  531. wakeup_reset(tr);
  532. tracer_enabled = 1;
  533. }
  534. static void wakeup_tracer_stop(struct trace_array *tr)
  535. {
  536. tracer_enabled = 0;
  537. }
  538. static struct tracer wakeup_tracer __read_mostly =
  539. {
  540. .name = "wakeup",
  541. .init = wakeup_tracer_init,
  542. .reset = wakeup_tracer_reset,
  543. .start = wakeup_tracer_start,
  544. .stop = wakeup_tracer_stop,
  545. .print_max = true,
  546. .print_header = wakeup_print_header,
  547. .print_line = wakeup_print_line,
  548. .flags = &tracer_flags,
  549. .set_flag = wakeup_set_flag,
  550. .flag_changed = wakeup_flag_changed,
  551. #ifdef CONFIG_FTRACE_SELFTEST
  552. .selftest = trace_selftest_startup_wakeup,
  553. #endif
  554. .open = wakeup_trace_open,
  555. .close = wakeup_trace_close,
  556. .use_max_tr = true,
  557. };
  558. static struct tracer wakeup_rt_tracer __read_mostly =
  559. {
  560. .name = "wakeup_rt",
  561. .init = wakeup_rt_tracer_init,
  562. .reset = wakeup_tracer_reset,
  563. .start = wakeup_tracer_start,
  564. .stop = wakeup_tracer_stop,
  565. .wait_pipe = poll_wait_pipe,
  566. .print_max = true,
  567. .print_header = wakeup_print_header,
  568. .print_line = wakeup_print_line,
  569. .flags = &tracer_flags,
  570. .set_flag = wakeup_set_flag,
  571. .flag_changed = wakeup_flag_changed,
  572. #ifdef CONFIG_FTRACE_SELFTEST
  573. .selftest = trace_selftest_startup_wakeup,
  574. #endif
  575. .open = wakeup_trace_open,
  576. .close = wakeup_trace_close,
  577. .use_max_tr = true,
  578. };
  579. static struct tracer wakeup_dl_tracer __read_mostly =
  580. {
  581. .name = "wakeup_dl",
  582. .init = wakeup_dl_tracer_init,
  583. .reset = wakeup_tracer_reset,
  584. .start = wakeup_tracer_start,
  585. .stop = wakeup_tracer_stop,
  586. .wait_pipe = poll_wait_pipe,
  587. .print_max = true,
  588. .print_header = wakeup_print_header,
  589. .print_line = wakeup_print_line,
  590. .flags = &tracer_flags,
  591. .set_flag = wakeup_set_flag,
  592. .flag_changed = wakeup_flag_changed,
  593. #ifdef CONFIG_FTRACE_SELFTEST
  594. .selftest = trace_selftest_startup_wakeup,
  595. #endif
  596. .open = wakeup_trace_open,
  597. .close = wakeup_trace_close,
  598. .use_max_tr = true,
  599. };
  600. __init static int init_wakeup_tracer(void)
  601. {
  602. int ret;
  603. ret = register_tracer(&wakeup_tracer);
  604. if (ret)
  605. return ret;
  606. ret = register_tracer(&wakeup_rt_tracer);
  607. if (ret)
  608. return ret;
  609. ret = register_tracer(&wakeup_dl_tracer);
  610. if (ret)
  611. return ret;
  612. return 0;
  613. }
  614. core_initcall(init_wakeup_tracer);