trace_irqsoff.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768
  1. /*
  2. * trace irqs off critical timings
  3. *
  4. * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  5. * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
  6. *
  7. * From code in the latency_tracer, that is:
  8. *
  9. * Copyright (C) 2004-2006 Ingo Molnar
  10. * Copyright (C) 2004 Nadia Yvette Chambers
  11. */
  12. #include <linux/kallsyms.h>
  13. #include <linux/uaccess.h>
  14. #include <linux/module.h>
  15. #include <linux/ftrace.h>
  16. #include "trace.h"
  17. static struct trace_array *irqsoff_trace __read_mostly;
  18. static int tracer_enabled __read_mostly;
  19. static DEFINE_PER_CPU(int, tracing_cpu);
  20. static DEFINE_RAW_SPINLOCK(max_trace_lock);
  21. enum {
  22. TRACER_IRQS_OFF = (1 << 1),
  23. TRACER_PREEMPT_OFF = (1 << 2),
  24. };
  25. static int trace_type __read_mostly;
  26. static int save_flags;
  27. static void stop_irqsoff_tracer(struct trace_array *tr, int graph);
  28. static int start_irqsoff_tracer(struct trace_array *tr, int graph);
  29. #ifdef CONFIG_PREEMPT_TRACER
  30. static inline int
  31. preempt_trace(void)
  32. {
  33. return ((trace_type & TRACER_PREEMPT_OFF) && preempt_count());
  34. }
  35. #else
  36. # define preempt_trace() (0)
  37. #endif
  38. #ifdef CONFIG_IRQSOFF_TRACER
  39. static inline int
  40. irq_trace(void)
  41. {
  42. return ((trace_type & TRACER_IRQS_OFF) &&
  43. irqs_disabled());
  44. }
  45. #else
  46. # define irq_trace() (0)
  47. #endif
  48. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  49. static int irqsoff_display_graph(struct trace_array *tr, int set);
  50. # define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH)
  51. #else
  52. static inline int irqsoff_display_graph(struct trace_array *tr, int set)
  53. {
  54. return -EINVAL;
  55. }
  56. # define is_graph(tr) false
  57. #endif
  58. /*
  59. * Sequence count - we record it when starting a measurement and
  60. * skip the latency if the sequence has changed - some other section
  61. * did a maximum and could disturb our measurement with serial console
  62. * printouts, etc. Truly coinciding maximum latencies should be rare
  63. * and what happens together happens separately as well, so this doesn't
  64. * decrease the validity of the maximum found:
  65. */
  66. static __cacheline_aligned_in_smp unsigned long max_sequence;
  67. #ifdef CONFIG_FUNCTION_TRACER
  68. /*
  69. * Prologue for the preempt and irqs off function tracers.
  70. *
  71. * Returns 1 if it is OK to continue, and data->disabled is
  72. * incremented.
  73. * 0 if the trace is to be ignored, and data->disabled
  74. * is kept the same.
  75. *
  76. * Note, this function is also used outside this ifdef but
  77. * inside the #ifdef of the function graph tracer below.
  78. * This is OK, since the function graph tracer is
  79. * dependent on the function tracer.
  80. */
  81. static int func_prolog_dec(struct trace_array *tr,
  82. struct trace_array_cpu **data,
  83. unsigned long *flags)
  84. {
  85. long disabled;
  86. int cpu;
  87. /*
  88. * Does not matter if we preempt. We test the flags
  89. * afterward, to see if irqs are disabled or not.
  90. * If we preempt and get a false positive, the flags
  91. * test will fail.
  92. */
  93. cpu = raw_smp_processor_id();
  94. if (likely(!per_cpu(tracing_cpu, cpu)))
  95. return 0;
  96. local_save_flags(*flags);
  97. /* slight chance to get a false positive on tracing_cpu */
  98. if (!irqs_disabled_flags(*flags))
  99. return 0;
  100. *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
  101. disabled = atomic_inc_return(&(*data)->disabled);
  102. if (likely(disabled == 1))
  103. return 1;
  104. atomic_dec(&(*data)->disabled);
  105. return 0;
  106. }
  107. /*
  108. * irqsoff uses its own tracer function to keep the overhead down:
  109. */
  110. static void
  111. irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip,
  112. struct ftrace_ops *op, struct pt_regs *pt_regs)
  113. {
  114. struct trace_array *tr = irqsoff_trace;
  115. struct trace_array_cpu *data;
  116. unsigned long flags;
  117. if (!func_prolog_dec(tr, &data, &flags))
  118. return;
  119. trace_function(tr, ip, parent_ip, flags, preempt_count());
  120. atomic_dec(&data->disabled);
  121. }
  122. #endif /* CONFIG_FUNCTION_TRACER */
  123. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  124. static int irqsoff_display_graph(struct trace_array *tr, int set)
  125. {
  126. int cpu;
  127. if (!(is_graph(tr) ^ set))
  128. return 0;
  129. stop_irqsoff_tracer(irqsoff_trace, !set);
  130. for_each_possible_cpu(cpu)
  131. per_cpu(tracing_cpu, cpu) = 0;
  132. tr->max_latency = 0;
  133. tracing_reset_online_cpus(&irqsoff_trace->trace_buffer);
  134. return start_irqsoff_tracer(irqsoff_trace, set);
  135. }
  136. static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
  137. {
  138. struct trace_array *tr = irqsoff_trace;
  139. struct trace_array_cpu *data;
  140. unsigned long flags;
  141. int ret;
  142. int pc;
  143. if (!func_prolog_dec(tr, &data, &flags))
  144. return 0;
  145. pc = preempt_count();
  146. ret = __trace_graph_entry(tr, trace, flags, pc);
  147. atomic_dec(&data->disabled);
  148. return ret;
  149. }
  150. static void irqsoff_graph_return(struct ftrace_graph_ret *trace)
  151. {
  152. struct trace_array *tr = irqsoff_trace;
  153. struct trace_array_cpu *data;
  154. unsigned long flags;
  155. int pc;
  156. if (!func_prolog_dec(tr, &data, &flags))
  157. return;
  158. pc = preempt_count();
  159. __trace_graph_return(tr, trace, flags, pc);
  160. atomic_dec(&data->disabled);
  161. }
  162. static void irqsoff_trace_open(struct trace_iterator *iter)
  163. {
  164. if (is_graph(iter->tr))
  165. graph_trace_open(iter);
  166. }
  167. static void irqsoff_trace_close(struct trace_iterator *iter)
  168. {
  169. if (iter->private)
  170. graph_trace_close(iter);
  171. }
  172. #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \
  173. TRACE_GRAPH_PRINT_PROC | \
  174. TRACE_GRAPH_PRINT_ABS_TIME | \
  175. TRACE_GRAPH_PRINT_DURATION)
  176. static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
  177. {
  178. /*
  179. * In graph mode call the graph tracer output function,
  180. * otherwise go with the TRACE_FN event handler
  181. */
  182. if (is_graph(iter->tr))
  183. return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
  184. return TRACE_TYPE_UNHANDLED;
  185. }
  186. static void irqsoff_print_header(struct seq_file *s)
  187. {
  188. struct trace_array *tr = irqsoff_trace;
  189. if (is_graph(tr))
  190. print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
  191. else
  192. trace_default_header(s);
  193. }
  194. static void
  195. __trace_function(struct trace_array *tr,
  196. unsigned long ip, unsigned long parent_ip,
  197. unsigned long flags, int pc)
  198. {
  199. if (is_graph(tr))
  200. trace_graph_function(tr, ip, parent_ip, flags, pc);
  201. else
  202. trace_function(tr, ip, parent_ip, flags, pc);
  203. }
  204. #else
  205. #define __trace_function trace_function
  206. #ifdef CONFIG_FUNCTION_TRACER
  207. static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
  208. {
  209. return -1;
  210. }
  211. #endif
  212. static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
  213. {
  214. return TRACE_TYPE_UNHANDLED;
  215. }
  216. static void irqsoff_trace_open(struct trace_iterator *iter) { }
  217. static void irqsoff_trace_close(struct trace_iterator *iter) { }
  218. #ifdef CONFIG_FUNCTION_TRACER
  219. static void irqsoff_graph_return(struct ftrace_graph_ret *trace) { }
  220. static void irqsoff_print_header(struct seq_file *s)
  221. {
  222. trace_default_header(s);
  223. }
  224. #else
  225. static void irqsoff_print_header(struct seq_file *s)
  226. {
  227. trace_latency_header(s);
  228. }
  229. #endif /* CONFIG_FUNCTION_TRACER */
  230. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
  231. /*
  232. * Should this new latency be reported/recorded?
  233. */
  234. static bool report_latency(struct trace_array *tr, cycle_t delta)
  235. {
  236. if (tracing_thresh) {
  237. if (delta < tracing_thresh)
  238. return false;
  239. } else {
  240. if (delta <= tr->max_latency)
  241. return false;
  242. }
  243. return true;
  244. }
  245. static void
  246. check_critical_timing(struct trace_array *tr,
  247. struct trace_array_cpu *data,
  248. unsigned long parent_ip,
  249. int cpu)
  250. {
  251. cycle_t T0, T1, delta;
  252. unsigned long flags;
  253. int pc;
  254. T0 = data->preempt_timestamp;
  255. T1 = ftrace_now(cpu);
  256. delta = T1-T0;
  257. local_save_flags(flags);
  258. pc = preempt_count();
  259. if (!report_latency(tr, delta))
  260. goto out;
  261. raw_spin_lock_irqsave(&max_trace_lock, flags);
  262. /* check if we are still the max latency */
  263. if (!report_latency(tr, delta))
  264. goto out_unlock;
  265. __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
  266. /* Skip 5 functions to get to the irq/preempt enable function */
  267. __trace_stack(tr, flags, 5, pc);
  268. if (data->critical_sequence != max_sequence)
  269. goto out_unlock;
  270. data->critical_end = parent_ip;
  271. if (likely(!is_tracing_stopped())) {
  272. tr->max_latency = delta;
  273. update_max_tr_single(tr, current, cpu);
  274. }
  275. max_sequence++;
  276. out_unlock:
  277. raw_spin_unlock_irqrestore(&max_trace_lock, flags);
  278. out:
  279. data->critical_sequence = max_sequence;
  280. data->preempt_timestamp = ftrace_now(cpu);
  281. __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
  282. }
  283. static inline void
  284. start_critical_timing(unsigned long ip, unsigned long parent_ip)
  285. {
  286. int cpu;
  287. struct trace_array *tr = irqsoff_trace;
  288. struct trace_array_cpu *data;
  289. unsigned long flags;
  290. if (!tracer_enabled || !tracing_is_enabled())
  291. return;
  292. cpu = raw_smp_processor_id();
  293. if (per_cpu(tracing_cpu, cpu))
  294. return;
  295. data = per_cpu_ptr(tr->trace_buffer.data, cpu);
  296. if (unlikely(!data) || atomic_read(&data->disabled))
  297. return;
  298. atomic_inc(&data->disabled);
  299. data->critical_sequence = max_sequence;
  300. data->preempt_timestamp = ftrace_now(cpu);
  301. data->critical_start = parent_ip ? : ip;
  302. local_save_flags(flags);
  303. __trace_function(tr, ip, parent_ip, flags, preempt_count());
  304. per_cpu(tracing_cpu, cpu) = 1;
  305. atomic_dec(&data->disabled);
  306. }
  307. static inline void
  308. stop_critical_timing(unsigned long ip, unsigned long parent_ip)
  309. {
  310. int cpu;
  311. struct trace_array *tr = irqsoff_trace;
  312. struct trace_array_cpu *data;
  313. unsigned long flags;
  314. cpu = raw_smp_processor_id();
  315. /* Always clear the tracing cpu on stopping the trace */
  316. if (unlikely(per_cpu(tracing_cpu, cpu)))
  317. per_cpu(tracing_cpu, cpu) = 0;
  318. else
  319. return;
  320. if (!tracer_enabled || !tracing_is_enabled())
  321. return;
  322. data = per_cpu_ptr(tr->trace_buffer.data, cpu);
  323. if (unlikely(!data) ||
  324. !data->critical_start || atomic_read(&data->disabled))
  325. return;
  326. atomic_inc(&data->disabled);
  327. local_save_flags(flags);
  328. __trace_function(tr, ip, parent_ip, flags, preempt_count());
  329. check_critical_timing(tr, data, parent_ip ? : ip, cpu);
  330. data->critical_start = 0;
  331. atomic_dec(&data->disabled);
  332. }
  333. /* start and stop critical timings used to for stoppage (in idle) */
  334. void start_critical_timings(void)
  335. {
  336. if (preempt_trace() || irq_trace())
  337. start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
  338. }
  339. EXPORT_SYMBOL_GPL(start_critical_timings);
  340. void stop_critical_timings(void)
  341. {
  342. if (preempt_trace() || irq_trace())
  343. stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
  344. }
  345. EXPORT_SYMBOL_GPL(stop_critical_timings);
  346. #ifdef CONFIG_IRQSOFF_TRACER
  347. #ifdef CONFIG_PROVE_LOCKING
  348. void time_hardirqs_on(unsigned long a0, unsigned long a1)
  349. {
  350. if (!preempt_trace() && irq_trace())
  351. stop_critical_timing(a0, a1);
  352. }
  353. void time_hardirqs_off(unsigned long a0, unsigned long a1)
  354. {
  355. if (!preempt_trace() && irq_trace())
  356. start_critical_timing(a0, a1);
  357. }
  358. #else /* !CONFIG_PROVE_LOCKING */
  359. /*
  360. * Stubs:
  361. */
  362. void trace_softirqs_on(unsigned long ip)
  363. {
  364. }
  365. void trace_softirqs_off(unsigned long ip)
  366. {
  367. }
  368. inline void print_irqtrace_events(struct task_struct *curr)
  369. {
  370. }
  371. /*
  372. * We are only interested in hardirq on/off events:
  373. */
  374. void trace_hardirqs_on(void)
  375. {
  376. if (!preempt_trace() && irq_trace())
  377. stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
  378. }
  379. EXPORT_SYMBOL(trace_hardirqs_on);
  380. void trace_hardirqs_off(void)
  381. {
  382. if (!preempt_trace() && irq_trace())
  383. start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
  384. }
  385. EXPORT_SYMBOL(trace_hardirqs_off);
  386. __visible void trace_hardirqs_on_caller(unsigned long caller_addr)
  387. {
  388. if (!preempt_trace() && irq_trace())
  389. stop_critical_timing(CALLER_ADDR0, caller_addr);
  390. }
  391. EXPORT_SYMBOL(trace_hardirqs_on_caller);
  392. __visible void trace_hardirqs_off_caller(unsigned long caller_addr)
  393. {
  394. if (!preempt_trace() && irq_trace())
  395. start_critical_timing(CALLER_ADDR0, caller_addr);
  396. }
  397. EXPORT_SYMBOL(trace_hardirqs_off_caller);
  398. #endif /* CONFIG_PROVE_LOCKING */
  399. #endif /* CONFIG_IRQSOFF_TRACER */
  400. #ifdef CONFIG_PREEMPT_TRACER
  401. void trace_preempt_on(unsigned long a0, unsigned long a1)
  402. {
  403. if (preempt_trace() && !irq_trace())
  404. stop_critical_timing(a0, a1);
  405. }
  406. void trace_preempt_off(unsigned long a0, unsigned long a1)
  407. {
  408. if (preempt_trace() && !irq_trace())
  409. start_critical_timing(a0, a1);
  410. }
  411. #endif /* CONFIG_PREEMPT_TRACER */
  412. #ifdef CONFIG_FUNCTION_TRACER
  413. static bool function_enabled;
  414. static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
  415. {
  416. int ret;
  417. /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
  418. if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION)))
  419. return 0;
  420. if (graph)
  421. ret = register_ftrace_graph(&irqsoff_graph_return,
  422. &irqsoff_graph_entry);
  423. else
  424. ret = register_ftrace_function(tr->ops);
  425. if (!ret)
  426. function_enabled = true;
  427. return ret;
  428. }
  429. static void unregister_irqsoff_function(struct trace_array *tr, int graph)
  430. {
  431. if (!function_enabled)
  432. return;
  433. if (graph)
  434. unregister_ftrace_graph();
  435. else
  436. unregister_ftrace_function(tr->ops);
  437. function_enabled = false;
  438. }
  439. static int irqsoff_function_set(struct trace_array *tr, u32 mask, int set)
  440. {
  441. if (!(mask & TRACE_ITER_FUNCTION))
  442. return 0;
  443. if (set)
  444. register_irqsoff_function(tr, is_graph(tr), 1);
  445. else
  446. unregister_irqsoff_function(tr, is_graph(tr));
  447. return 1;
  448. }
  449. #else
  450. static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
  451. {
  452. return 0;
  453. }
  454. static void unregister_irqsoff_function(struct trace_array *tr, int graph) { }
  455. static inline int irqsoff_function_set(struct trace_array *tr, u32 mask, int set)
  456. {
  457. return 0;
  458. }
  459. #endif /* CONFIG_FUNCTION_TRACER */
  460. static int irqsoff_flag_changed(struct trace_array *tr, u32 mask, int set)
  461. {
  462. struct tracer *tracer = tr->current_trace;
  463. if (irqsoff_function_set(tr, mask, set))
  464. return 0;
  465. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  466. if (mask & TRACE_ITER_DISPLAY_GRAPH)
  467. return irqsoff_display_graph(tr, set);
  468. #endif
  469. return trace_keep_overwrite(tracer, mask, set);
  470. }
  471. static int start_irqsoff_tracer(struct trace_array *tr, int graph)
  472. {
  473. int ret;
  474. ret = register_irqsoff_function(tr, graph, 0);
  475. if (!ret && tracing_is_enabled())
  476. tracer_enabled = 1;
  477. else
  478. tracer_enabled = 0;
  479. return ret;
  480. }
  481. static void stop_irqsoff_tracer(struct trace_array *tr, int graph)
  482. {
  483. tracer_enabled = 0;
  484. unregister_irqsoff_function(tr, graph);
  485. }
  486. static bool irqsoff_busy;
  487. static int __irqsoff_tracer_init(struct trace_array *tr)
  488. {
  489. if (irqsoff_busy)
  490. return -EBUSY;
  491. save_flags = tr->trace_flags;
  492. /* non overwrite screws up the latency tracers */
  493. set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
  494. set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
  495. tr->max_latency = 0;
  496. irqsoff_trace = tr;
  497. /* make sure that the tracer is visible */
  498. smp_wmb();
  499. tracing_reset_online_cpus(&tr->trace_buffer);
  500. ftrace_init_array_ops(tr, irqsoff_tracer_call);
  501. /* Only toplevel instance supports graph tracing */
  502. if (start_irqsoff_tracer(tr, (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
  503. is_graph(tr))))
  504. printk(KERN_ERR "failed to start irqsoff tracer\n");
  505. irqsoff_busy = true;
  506. return 0;
  507. }
  508. static void irqsoff_tracer_reset(struct trace_array *tr)
  509. {
  510. int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
  511. int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
  512. stop_irqsoff_tracer(tr, is_graph(tr));
  513. set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
  514. set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
  515. ftrace_reset_array_ops(tr);
  516. irqsoff_busy = false;
  517. }
  518. static void irqsoff_tracer_start(struct trace_array *tr)
  519. {
  520. tracer_enabled = 1;
  521. }
  522. static void irqsoff_tracer_stop(struct trace_array *tr)
  523. {
  524. tracer_enabled = 0;
  525. }
  526. #ifdef CONFIG_IRQSOFF_TRACER
  527. static int irqsoff_tracer_init(struct trace_array *tr)
  528. {
  529. trace_type = TRACER_IRQS_OFF;
  530. return __irqsoff_tracer_init(tr);
  531. }
  532. static struct tracer irqsoff_tracer __read_mostly =
  533. {
  534. .name = "irqsoff",
  535. .init = irqsoff_tracer_init,
  536. .reset = irqsoff_tracer_reset,
  537. .start = irqsoff_tracer_start,
  538. .stop = irqsoff_tracer_stop,
  539. .print_max = true,
  540. .print_header = irqsoff_print_header,
  541. .print_line = irqsoff_print_line,
  542. .flag_changed = irqsoff_flag_changed,
  543. #ifdef CONFIG_FTRACE_SELFTEST
  544. .selftest = trace_selftest_startup_irqsoff,
  545. #endif
  546. .open = irqsoff_trace_open,
  547. .close = irqsoff_trace_close,
  548. .allow_instances = true,
  549. .use_max_tr = true,
  550. };
  551. # define register_irqsoff(trace) register_tracer(&trace)
  552. #else
  553. # define register_irqsoff(trace) do { } while (0)
  554. #endif
  555. #ifdef CONFIG_PREEMPT_TRACER
  556. static int preemptoff_tracer_init(struct trace_array *tr)
  557. {
  558. trace_type = TRACER_PREEMPT_OFF;
  559. return __irqsoff_tracer_init(tr);
  560. }
  561. static struct tracer preemptoff_tracer __read_mostly =
  562. {
  563. .name = "preemptoff",
  564. .init = preemptoff_tracer_init,
  565. .reset = irqsoff_tracer_reset,
  566. .start = irqsoff_tracer_start,
  567. .stop = irqsoff_tracer_stop,
  568. .print_max = true,
  569. .print_header = irqsoff_print_header,
  570. .print_line = irqsoff_print_line,
  571. .flag_changed = irqsoff_flag_changed,
  572. #ifdef CONFIG_FTRACE_SELFTEST
  573. .selftest = trace_selftest_startup_preemptoff,
  574. #endif
  575. .open = irqsoff_trace_open,
  576. .close = irqsoff_trace_close,
  577. .allow_instances = true,
  578. .use_max_tr = true,
  579. };
  580. # define register_preemptoff(trace) register_tracer(&trace)
  581. #else
  582. # define register_preemptoff(trace) do { } while (0)
  583. #endif
  584. #if defined(CONFIG_IRQSOFF_TRACER) && \
  585. defined(CONFIG_PREEMPT_TRACER)
  586. static int preemptirqsoff_tracer_init(struct trace_array *tr)
  587. {
  588. trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF;
  589. return __irqsoff_tracer_init(tr);
  590. }
  591. static struct tracer preemptirqsoff_tracer __read_mostly =
  592. {
  593. .name = "preemptirqsoff",
  594. .init = preemptirqsoff_tracer_init,
  595. .reset = irqsoff_tracer_reset,
  596. .start = irqsoff_tracer_start,
  597. .stop = irqsoff_tracer_stop,
  598. .print_max = true,
  599. .print_header = irqsoff_print_header,
  600. .print_line = irqsoff_print_line,
  601. .flag_changed = irqsoff_flag_changed,
  602. #ifdef CONFIG_FTRACE_SELFTEST
  603. .selftest = trace_selftest_startup_preemptirqsoff,
  604. #endif
  605. .open = irqsoff_trace_open,
  606. .close = irqsoff_trace_close,
  607. .allow_instances = true,
  608. .use_max_tr = true,
  609. };
  610. # define register_preemptirqsoff(trace) register_tracer(&trace)
  611. #else
  612. # define register_preemptirqsoff(trace) do { } while (0)
  613. #endif
  614. __init static int init_irqsoff_tracer(void)
  615. {
  616. register_irqsoff(irqsoff_tracer);
  617. register_preemptoff(preemptoff_tracer);
  618. register_preemptirqsoff(preemptirqsoff_tracer);
  619. return 0;
  620. }
  621. core_initcall(init_irqsoff_tracer);