trace_irqsoff.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * trace irqs off critical timings
  4. *
  5. * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  6. * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
  7. *
  8. * From code in the latency_tracer, that is:
  9. *
  10. * Copyright (C) 2004-2006 Ingo Molnar
  11. * Copyright (C) 2004 Nadia Yvette Chambers
  12. */
  13. #include <linux/kallsyms.h>
  14. #include <linux/uaccess.h>
  15. #include <linux/module.h>
  16. #include <linux/ftrace.h>
  17. #include "trace.h"
  18. #include <trace/events/preemptirq.h>
  19. #if defined(CONFIG_IRQSOFF_TRACER) || defined(CONFIG_PREEMPT_TRACER)
  20. static struct trace_array *irqsoff_trace __read_mostly;
  21. static int tracer_enabled __read_mostly;
  22. static DEFINE_PER_CPU(int, tracing_cpu);
  23. static DEFINE_RAW_SPINLOCK(max_trace_lock);
  24. enum {
  25. TRACER_IRQS_OFF = (1 << 1),
  26. TRACER_PREEMPT_OFF = (1 << 2),
  27. };
  28. static int trace_type __read_mostly;
  29. static int save_flags;
  30. static void stop_irqsoff_tracer(struct trace_array *tr, int graph);
  31. static int start_irqsoff_tracer(struct trace_array *tr, int graph);
  32. #ifdef CONFIG_PREEMPT_TRACER
  33. static inline int
  34. preempt_trace(int pc)
  35. {
  36. return ((trace_type & TRACER_PREEMPT_OFF) && pc);
  37. }
  38. #else
  39. # define preempt_trace(pc) (0)
  40. #endif
  41. #ifdef CONFIG_IRQSOFF_TRACER
  42. static inline int
  43. irq_trace(void)
  44. {
  45. return ((trace_type & TRACER_IRQS_OFF) &&
  46. irqs_disabled());
  47. }
  48. #else
  49. # define irq_trace() (0)
  50. #endif
  51. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  52. static int irqsoff_display_graph(struct trace_array *tr, int set);
  53. # define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH)
  54. #else
  55. static inline int irqsoff_display_graph(struct trace_array *tr, int set)
  56. {
  57. return -EINVAL;
  58. }
  59. # define is_graph(tr) false
  60. #endif
  61. /*
  62. * Sequence count - we record it when starting a measurement and
  63. * skip the latency if the sequence has changed - some other section
  64. * did a maximum and could disturb our measurement with serial console
  65. * printouts, etc. Truly coinciding maximum latencies should be rare
  66. * and what happens together happens separately as well, so this doesn't
  67. * decrease the validity of the maximum found:
  68. */
  69. static __cacheline_aligned_in_smp unsigned long max_sequence;
  70. #ifdef CONFIG_FUNCTION_TRACER
  71. /*
  72. * Prologue for the preempt and irqs off function tracers.
  73. *
  74. * Returns 1 if it is OK to continue, and data->disabled is
  75. * incremented.
  76. * 0 if the trace is to be ignored, and data->disabled
  77. * is kept the same.
  78. *
  79. * Note, this function is also used outside this ifdef but
  80. * inside the #ifdef of the function graph tracer below.
  81. * This is OK, since the function graph tracer is
  82. * dependent on the function tracer.
  83. */
  84. static int func_prolog_dec(struct trace_array *tr,
  85. struct trace_array_cpu **data,
  86. unsigned long *flags)
  87. {
  88. long disabled;
  89. int cpu;
  90. /*
  91. * Does not matter if we preempt. We test the flags
  92. * afterward, to see if irqs are disabled or not.
  93. * If we preempt and get a false positive, the flags
  94. * test will fail.
  95. */
  96. cpu = raw_smp_processor_id();
  97. if (likely(!per_cpu(tracing_cpu, cpu)))
  98. return 0;
  99. local_save_flags(*flags);
  100. /*
  101. * Slight chance to get a false positive on tracing_cpu,
  102. * although I'm starting to think there isn't a chance.
  103. * Leave this for now just to be paranoid.
  104. */
  105. if (!irqs_disabled_flags(*flags) && !preempt_count())
  106. return 0;
  107. *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
  108. disabled = atomic_inc_return(&(*data)->disabled);
  109. if (likely(disabled == 1))
  110. return 1;
  111. atomic_dec(&(*data)->disabled);
  112. return 0;
  113. }
  114. /*
  115. * irqsoff uses its own tracer function to keep the overhead down:
  116. */
  117. static void
  118. irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip,
  119. struct ftrace_ops *op, struct pt_regs *pt_regs)
  120. {
  121. struct trace_array *tr = irqsoff_trace;
  122. struct trace_array_cpu *data;
  123. unsigned long flags;
  124. if (!func_prolog_dec(tr, &data, &flags))
  125. return;
  126. trace_function(tr, ip, parent_ip, flags, preempt_count());
  127. atomic_dec(&data->disabled);
  128. }
  129. #endif /* CONFIG_FUNCTION_TRACER */
  130. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  131. static int irqsoff_display_graph(struct trace_array *tr, int set)
  132. {
  133. int cpu;
  134. if (!(is_graph(tr) ^ set))
  135. return 0;
  136. stop_irqsoff_tracer(irqsoff_trace, !set);
  137. for_each_possible_cpu(cpu)
  138. per_cpu(tracing_cpu, cpu) = 0;
  139. tr->max_latency = 0;
  140. tracing_reset_online_cpus(&irqsoff_trace->trace_buffer);
  141. return start_irqsoff_tracer(irqsoff_trace, set);
  142. }
  143. static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
  144. {
  145. struct trace_array *tr = irqsoff_trace;
  146. struct trace_array_cpu *data;
  147. unsigned long flags;
  148. int ret;
  149. int pc;
  150. if (ftrace_graph_ignore_func(trace))
  151. return 0;
  152. /*
  153. * Do not trace a function if it's filtered by set_graph_notrace.
  154. * Make the index of ret stack negative to indicate that it should
  155. * ignore further functions. But it needs its own ret stack entry
  156. * to recover the original index in order to continue tracing after
  157. * returning from the function.
  158. */
  159. if (ftrace_graph_notrace_addr(trace->func))
  160. return 1;
  161. if (!func_prolog_dec(tr, &data, &flags))
  162. return 0;
  163. pc = preempt_count();
  164. ret = __trace_graph_entry(tr, trace, flags, pc);
  165. atomic_dec(&data->disabled);
  166. return ret;
  167. }
  168. static void irqsoff_graph_return(struct ftrace_graph_ret *trace)
  169. {
  170. struct trace_array *tr = irqsoff_trace;
  171. struct trace_array_cpu *data;
  172. unsigned long flags;
  173. int pc;
  174. if (!func_prolog_dec(tr, &data, &flags))
  175. return;
  176. pc = preempt_count();
  177. __trace_graph_return(tr, trace, flags, pc);
  178. atomic_dec(&data->disabled);
  179. }
  180. static void irqsoff_trace_open(struct trace_iterator *iter)
  181. {
  182. if (is_graph(iter->tr))
  183. graph_trace_open(iter);
  184. }
  185. static void irqsoff_trace_close(struct trace_iterator *iter)
  186. {
  187. if (iter->private)
  188. graph_trace_close(iter);
  189. }
  190. #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \
  191. TRACE_GRAPH_PRINT_PROC | \
  192. TRACE_GRAPH_PRINT_ABS_TIME | \
  193. TRACE_GRAPH_PRINT_DURATION)
  194. static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
  195. {
  196. /*
  197. * In graph mode call the graph tracer output function,
  198. * otherwise go with the TRACE_FN event handler
  199. */
  200. if (is_graph(iter->tr))
  201. return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
  202. return TRACE_TYPE_UNHANDLED;
  203. }
  204. static void irqsoff_print_header(struct seq_file *s)
  205. {
  206. struct trace_array *tr = irqsoff_trace;
  207. if (is_graph(tr))
  208. print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
  209. else
  210. trace_default_header(s);
  211. }
  212. static void
  213. __trace_function(struct trace_array *tr,
  214. unsigned long ip, unsigned long parent_ip,
  215. unsigned long flags, int pc)
  216. {
  217. if (is_graph(tr))
  218. trace_graph_function(tr, ip, parent_ip, flags, pc);
  219. else
  220. trace_function(tr, ip, parent_ip, flags, pc);
  221. }
  222. #else
  223. #define __trace_function trace_function
  224. #ifdef CONFIG_FUNCTION_TRACER
  225. static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
  226. {
  227. return -1;
  228. }
  229. #endif
  230. static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
  231. {
  232. return TRACE_TYPE_UNHANDLED;
  233. }
  234. static void irqsoff_trace_open(struct trace_iterator *iter) { }
  235. static void irqsoff_trace_close(struct trace_iterator *iter) { }
  236. #ifdef CONFIG_FUNCTION_TRACER
  237. static void irqsoff_graph_return(struct ftrace_graph_ret *trace) { }
  238. static void irqsoff_print_header(struct seq_file *s)
  239. {
  240. trace_default_header(s);
  241. }
  242. #else
  243. static void irqsoff_print_header(struct seq_file *s)
  244. {
  245. trace_latency_header(s);
  246. }
  247. #endif /* CONFIG_FUNCTION_TRACER */
  248. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
  249. /*
  250. * Should this new latency be reported/recorded?
  251. */
  252. static bool report_latency(struct trace_array *tr, u64 delta)
  253. {
  254. if (tracing_thresh) {
  255. if (delta < tracing_thresh)
  256. return false;
  257. } else {
  258. if (delta <= tr->max_latency)
  259. return false;
  260. }
  261. return true;
  262. }
  263. static void
  264. check_critical_timing(struct trace_array *tr,
  265. struct trace_array_cpu *data,
  266. unsigned long parent_ip,
  267. int cpu)
  268. {
  269. u64 T0, T1, delta;
  270. unsigned long flags;
  271. int pc;
  272. T0 = data->preempt_timestamp;
  273. T1 = ftrace_now(cpu);
  274. delta = T1-T0;
  275. local_save_flags(flags);
  276. pc = preempt_count();
  277. if (!report_latency(tr, delta))
  278. goto out;
  279. raw_spin_lock_irqsave(&max_trace_lock, flags);
  280. /* check if we are still the max latency */
  281. if (!report_latency(tr, delta))
  282. goto out_unlock;
  283. __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
  284. /* Skip 5 functions to get to the irq/preempt enable function */
  285. __trace_stack(tr, flags, 5, pc);
  286. if (data->critical_sequence != max_sequence)
  287. goto out_unlock;
  288. data->critical_end = parent_ip;
  289. if (likely(!is_tracing_stopped())) {
  290. tr->max_latency = delta;
  291. update_max_tr_single(tr, current, cpu);
  292. }
  293. max_sequence++;
  294. out_unlock:
  295. raw_spin_unlock_irqrestore(&max_trace_lock, flags);
  296. out:
  297. data->critical_sequence = max_sequence;
  298. data->preempt_timestamp = ftrace_now(cpu);
  299. __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
  300. }
  301. static inline void
  302. start_critical_timing(unsigned long ip, unsigned long parent_ip, int pc)
  303. {
  304. int cpu;
  305. struct trace_array *tr = irqsoff_trace;
  306. struct trace_array_cpu *data;
  307. unsigned long flags;
  308. if (!tracer_enabled || !tracing_is_enabled())
  309. return;
  310. cpu = raw_smp_processor_id();
  311. if (per_cpu(tracing_cpu, cpu))
  312. return;
  313. data = per_cpu_ptr(tr->trace_buffer.data, cpu);
  314. if (unlikely(!data) || atomic_read(&data->disabled))
  315. return;
  316. atomic_inc(&data->disabled);
  317. data->critical_sequence = max_sequence;
  318. data->preempt_timestamp = ftrace_now(cpu);
  319. data->critical_start = parent_ip ? : ip;
  320. local_save_flags(flags);
  321. __trace_function(tr, ip, parent_ip, flags, pc);
  322. per_cpu(tracing_cpu, cpu) = 1;
  323. atomic_dec(&data->disabled);
  324. }
  325. static inline void
  326. stop_critical_timing(unsigned long ip, unsigned long parent_ip, int pc)
  327. {
  328. int cpu;
  329. struct trace_array *tr = irqsoff_trace;
  330. struct trace_array_cpu *data;
  331. unsigned long flags;
  332. cpu = raw_smp_processor_id();
  333. /* Always clear the tracing cpu on stopping the trace */
  334. if (unlikely(per_cpu(tracing_cpu, cpu)))
  335. per_cpu(tracing_cpu, cpu) = 0;
  336. else
  337. return;
  338. if (!tracer_enabled || !tracing_is_enabled())
  339. return;
  340. data = per_cpu_ptr(tr->trace_buffer.data, cpu);
  341. if (unlikely(!data) ||
  342. !data->critical_start || atomic_read(&data->disabled))
  343. return;
  344. atomic_inc(&data->disabled);
  345. local_save_flags(flags);
  346. __trace_function(tr, ip, parent_ip, flags, pc);
  347. check_critical_timing(tr, data, parent_ip ? : ip, cpu);
  348. data->critical_start = 0;
  349. atomic_dec(&data->disabled);
  350. }
  351. /* start and stop critical timings used to for stoppage (in idle) */
  352. void start_critical_timings(void)
  353. {
  354. int pc = preempt_count();
  355. if (preempt_trace(pc) || irq_trace())
  356. start_critical_timing(CALLER_ADDR0, CALLER_ADDR1, pc);
  357. }
  358. EXPORT_SYMBOL_GPL(start_critical_timings);
  359. void stop_critical_timings(void)
  360. {
  361. int pc = preempt_count();
  362. if (preempt_trace(pc) || irq_trace())
  363. stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1, pc);
  364. }
  365. EXPORT_SYMBOL_GPL(stop_critical_timings);
  366. #ifdef CONFIG_FUNCTION_TRACER
  367. static bool function_enabled;
  368. static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
  369. {
  370. int ret;
  371. /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
  372. if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION)))
  373. return 0;
  374. if (graph)
  375. ret = register_ftrace_graph(&irqsoff_graph_return,
  376. &irqsoff_graph_entry);
  377. else
  378. ret = register_ftrace_function(tr->ops);
  379. if (!ret)
  380. function_enabled = true;
  381. return ret;
  382. }
  383. static void unregister_irqsoff_function(struct trace_array *tr, int graph)
  384. {
  385. if (!function_enabled)
  386. return;
  387. if (graph)
  388. unregister_ftrace_graph();
  389. else
  390. unregister_ftrace_function(tr->ops);
  391. function_enabled = false;
  392. }
  393. static int irqsoff_function_set(struct trace_array *tr, u32 mask, int set)
  394. {
  395. if (!(mask & TRACE_ITER_FUNCTION))
  396. return 0;
  397. if (set)
  398. register_irqsoff_function(tr, is_graph(tr), 1);
  399. else
  400. unregister_irqsoff_function(tr, is_graph(tr));
  401. return 1;
  402. }
  403. #else
  404. static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
  405. {
  406. return 0;
  407. }
  408. static void unregister_irqsoff_function(struct trace_array *tr, int graph) { }
  409. static inline int irqsoff_function_set(struct trace_array *tr, u32 mask, int set)
  410. {
  411. return 0;
  412. }
  413. #endif /* CONFIG_FUNCTION_TRACER */
  414. static int irqsoff_flag_changed(struct trace_array *tr, u32 mask, int set)
  415. {
  416. struct tracer *tracer = tr->current_trace;
  417. if (irqsoff_function_set(tr, mask, set))
  418. return 0;
  419. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  420. if (mask & TRACE_ITER_DISPLAY_GRAPH)
  421. return irqsoff_display_graph(tr, set);
  422. #endif
  423. return trace_keep_overwrite(tracer, mask, set);
  424. }
  425. static int start_irqsoff_tracer(struct trace_array *tr, int graph)
  426. {
  427. int ret;
  428. ret = register_irqsoff_function(tr, graph, 0);
  429. if (!ret && tracing_is_enabled())
  430. tracer_enabled = 1;
  431. else
  432. tracer_enabled = 0;
  433. return ret;
  434. }
  435. static void stop_irqsoff_tracer(struct trace_array *tr, int graph)
  436. {
  437. tracer_enabled = 0;
  438. unregister_irqsoff_function(tr, graph);
  439. }
  440. static bool irqsoff_busy;
  441. static int __irqsoff_tracer_init(struct trace_array *tr)
  442. {
  443. if (irqsoff_busy)
  444. return -EBUSY;
  445. save_flags = tr->trace_flags;
  446. /* non overwrite screws up the latency tracers */
  447. set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
  448. set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
  449. tr->max_latency = 0;
  450. irqsoff_trace = tr;
  451. /* make sure that the tracer is visible */
  452. smp_wmb();
  453. ftrace_init_array_ops(tr, irqsoff_tracer_call);
  454. /* Only toplevel instance supports graph tracing */
  455. if (start_irqsoff_tracer(tr, (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
  456. is_graph(tr))))
  457. printk(KERN_ERR "failed to start irqsoff tracer\n");
  458. irqsoff_busy = true;
  459. return 0;
  460. }
  461. static void __irqsoff_tracer_reset(struct trace_array *tr)
  462. {
  463. int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
  464. int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
  465. stop_irqsoff_tracer(tr, is_graph(tr));
  466. set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
  467. set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
  468. ftrace_reset_array_ops(tr);
  469. irqsoff_busy = false;
  470. }
  471. static void irqsoff_tracer_start(struct trace_array *tr)
  472. {
  473. tracer_enabled = 1;
  474. }
  475. static void irqsoff_tracer_stop(struct trace_array *tr)
  476. {
  477. tracer_enabled = 0;
  478. }
  479. #ifdef CONFIG_IRQSOFF_TRACER
  480. /*
  481. * We are only interested in hardirq on/off events:
  482. */
  483. void tracer_hardirqs_on(unsigned long a0, unsigned long a1)
  484. {
  485. unsigned int pc = preempt_count();
  486. if (!preempt_trace(pc) && irq_trace())
  487. stop_critical_timing(a0, a1, pc);
  488. }
  489. void tracer_hardirqs_off(unsigned long a0, unsigned long a1)
  490. {
  491. unsigned int pc = preempt_count();
  492. if (!preempt_trace(pc) && irq_trace())
  493. start_critical_timing(a0, a1, pc);
  494. }
  495. static int irqsoff_tracer_init(struct trace_array *tr)
  496. {
  497. trace_type = TRACER_IRQS_OFF;
  498. return __irqsoff_tracer_init(tr);
  499. }
  500. static void irqsoff_tracer_reset(struct trace_array *tr)
  501. {
  502. __irqsoff_tracer_reset(tr);
  503. }
  504. static struct tracer irqsoff_tracer __read_mostly =
  505. {
  506. .name = "irqsoff",
  507. .init = irqsoff_tracer_init,
  508. .reset = irqsoff_tracer_reset,
  509. .start = irqsoff_tracer_start,
  510. .stop = irqsoff_tracer_stop,
  511. .print_max = true,
  512. .print_header = irqsoff_print_header,
  513. .print_line = irqsoff_print_line,
  514. .flag_changed = irqsoff_flag_changed,
  515. #ifdef CONFIG_FTRACE_SELFTEST
  516. .selftest = trace_selftest_startup_irqsoff,
  517. #endif
  518. .open = irqsoff_trace_open,
  519. .close = irqsoff_trace_close,
  520. .allow_instances = true,
  521. .use_max_tr = true,
  522. };
  523. #endif /* CONFIG_IRQSOFF_TRACER */
  524. #ifdef CONFIG_PREEMPT_TRACER
  525. void tracer_preempt_on(unsigned long a0, unsigned long a1)
  526. {
  527. int pc = preempt_count();
  528. if (preempt_trace(pc) && !irq_trace())
  529. stop_critical_timing(a0, a1, pc);
  530. }
  531. void tracer_preempt_off(unsigned long a0, unsigned long a1)
  532. {
  533. int pc = preempt_count();
  534. if (preempt_trace(pc) && !irq_trace())
  535. start_critical_timing(a0, a1, pc);
  536. }
  537. static int preemptoff_tracer_init(struct trace_array *tr)
  538. {
  539. trace_type = TRACER_PREEMPT_OFF;
  540. return __irqsoff_tracer_init(tr);
  541. }
  542. static void preemptoff_tracer_reset(struct trace_array *tr)
  543. {
  544. __irqsoff_tracer_reset(tr);
  545. }
  546. static struct tracer preemptoff_tracer __read_mostly =
  547. {
  548. .name = "preemptoff",
  549. .init = preemptoff_tracer_init,
  550. .reset = preemptoff_tracer_reset,
  551. .start = irqsoff_tracer_start,
  552. .stop = irqsoff_tracer_stop,
  553. .print_max = true,
  554. .print_header = irqsoff_print_header,
  555. .print_line = irqsoff_print_line,
  556. .flag_changed = irqsoff_flag_changed,
  557. #ifdef CONFIG_FTRACE_SELFTEST
  558. .selftest = trace_selftest_startup_preemptoff,
  559. #endif
  560. .open = irqsoff_trace_open,
  561. .close = irqsoff_trace_close,
  562. .allow_instances = true,
  563. .use_max_tr = true,
  564. };
  565. #endif /* CONFIG_PREEMPT_TRACER */
  566. #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
  567. static int preemptirqsoff_tracer_init(struct trace_array *tr)
  568. {
  569. trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF;
  570. return __irqsoff_tracer_init(tr);
  571. }
  572. static void preemptirqsoff_tracer_reset(struct trace_array *tr)
  573. {
  574. __irqsoff_tracer_reset(tr);
  575. }
  576. static struct tracer preemptirqsoff_tracer __read_mostly =
  577. {
  578. .name = "preemptirqsoff",
  579. .init = preemptirqsoff_tracer_init,
  580. .reset = preemptirqsoff_tracer_reset,
  581. .start = irqsoff_tracer_start,
  582. .stop = irqsoff_tracer_stop,
  583. .print_max = true,
  584. .print_header = irqsoff_print_header,
  585. .print_line = irqsoff_print_line,
  586. .flag_changed = irqsoff_flag_changed,
  587. #ifdef CONFIG_FTRACE_SELFTEST
  588. .selftest = trace_selftest_startup_preemptirqsoff,
  589. #endif
  590. .open = irqsoff_trace_open,
  591. .close = irqsoff_trace_close,
  592. .allow_instances = true,
  593. .use_max_tr = true,
  594. };
  595. #endif
  596. __init static int init_irqsoff_tracer(void)
  597. {
  598. #ifdef CONFIG_IRQSOFF_TRACER
  599. register_tracer(&irqsoff_tracer);
  600. #endif
  601. #ifdef CONFIG_PREEMPT_TRACER
  602. register_tracer(&preemptoff_tracer);
  603. #endif
  604. #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
  605. register_tracer(&preemptirqsoff_tracer);
  606. #endif
  607. return 0;
  608. }
  609. core_initcall(init_irqsoff_tracer);
  610. #endif /* IRQSOFF_TRACER || PREEMPTOFF_TRACER */