trace_irqsoff.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864
  1. /*
  2. * trace irqs off critical timings
  3. *
  4. * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  5. * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
  6. *
  7. * From code in the latency_tracer, that is:
  8. *
  9. * Copyright (C) 2004-2006 Ingo Molnar
  10. * Copyright (C) 2004 Nadia Yvette Chambers
  11. */
  12. #include <linux/kallsyms.h>
  13. #include <linux/uaccess.h>
  14. #include <linux/module.h>
  15. #include <linux/ftrace.h>
  16. #include "trace.h"
  17. #define CREATE_TRACE_POINTS
  18. #include <trace/events/preemptirq.h>
  19. #if defined(CONFIG_IRQSOFF_TRACER) || defined(CONFIG_PREEMPT_TRACER)
  20. static struct trace_array *irqsoff_trace __read_mostly;
  21. static int tracer_enabled __read_mostly;
  22. static DEFINE_PER_CPU(int, tracing_cpu);
  23. static DEFINE_RAW_SPINLOCK(max_trace_lock);
  24. enum {
  25. TRACER_IRQS_OFF = (1 << 1),
  26. TRACER_PREEMPT_OFF = (1 << 2),
  27. };
  28. static int trace_type __read_mostly;
  29. static int save_flags;
  30. static void stop_irqsoff_tracer(struct trace_array *tr, int graph);
  31. static int start_irqsoff_tracer(struct trace_array *tr, int graph);
  32. #ifdef CONFIG_PREEMPT_TRACER
  33. static inline int
  34. preempt_trace(void)
  35. {
  36. return ((trace_type & TRACER_PREEMPT_OFF) && preempt_count());
  37. }
  38. #else
  39. # define preempt_trace() (0)
  40. #endif
  41. #ifdef CONFIG_IRQSOFF_TRACER
  42. static inline int
  43. irq_trace(void)
  44. {
  45. return ((trace_type & TRACER_IRQS_OFF) &&
  46. irqs_disabled());
  47. }
  48. #else
  49. # define irq_trace() (0)
  50. #endif
  51. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  52. static int irqsoff_display_graph(struct trace_array *tr, int set);
  53. # define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH)
  54. #else
  55. static inline int irqsoff_display_graph(struct trace_array *tr, int set)
  56. {
  57. return -EINVAL;
  58. }
  59. # define is_graph(tr) false
  60. #endif
  61. /*
  62. * Sequence count - we record it when starting a measurement and
  63. * skip the latency if the sequence has changed - some other section
  64. * did a maximum and could disturb our measurement with serial console
  65. * printouts, etc. Truly coinciding maximum latencies should be rare
  66. * and what happens together happens separately as well, so this doesn't
  67. * decrease the validity of the maximum found:
  68. */
  69. static __cacheline_aligned_in_smp unsigned long max_sequence;
  70. #ifdef CONFIG_FUNCTION_TRACER
  71. /*
  72. * Prologue for the preempt and irqs off function tracers.
  73. *
  74. * Returns 1 if it is OK to continue, and data->disabled is
  75. * incremented.
  76. * 0 if the trace is to be ignored, and data->disabled
  77. * is kept the same.
  78. *
  79. * Note, this function is also used outside this ifdef but
  80. * inside the #ifdef of the function graph tracer below.
  81. * This is OK, since the function graph tracer is
  82. * dependent on the function tracer.
  83. */
  84. static int func_prolog_dec(struct trace_array *tr,
  85. struct trace_array_cpu **data,
  86. unsigned long *flags)
  87. {
  88. long disabled;
  89. int cpu;
  90. /*
  91. * Does not matter if we preempt. We test the flags
  92. * afterward, to see if irqs are disabled or not.
  93. * If we preempt and get a false positive, the flags
  94. * test will fail.
  95. */
  96. cpu = raw_smp_processor_id();
  97. if (likely(!per_cpu(tracing_cpu, cpu)))
  98. return 0;
  99. local_save_flags(*flags);
  100. /*
  101. * Slight chance to get a false positive on tracing_cpu,
  102. * although I'm starting to think there isn't a chance.
  103. * Leave this for now just to be paranoid.
  104. */
  105. if (!irqs_disabled_flags(*flags) && !preempt_count())
  106. return 0;
  107. *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
  108. disabled = atomic_inc_return(&(*data)->disabled);
  109. if (likely(disabled == 1))
  110. return 1;
  111. atomic_dec(&(*data)->disabled);
  112. return 0;
  113. }
  114. /*
  115. * irqsoff uses its own tracer function to keep the overhead down:
  116. */
  117. static void
  118. irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip,
  119. struct ftrace_ops *op, struct pt_regs *pt_regs)
  120. {
  121. struct trace_array *tr = irqsoff_trace;
  122. struct trace_array_cpu *data;
  123. unsigned long flags;
  124. if (!func_prolog_dec(tr, &data, &flags))
  125. return;
  126. trace_function(tr, ip, parent_ip, flags, preempt_count());
  127. atomic_dec(&data->disabled);
  128. }
  129. #endif /* CONFIG_FUNCTION_TRACER */
  130. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  131. static int irqsoff_display_graph(struct trace_array *tr, int set)
  132. {
  133. int cpu;
  134. if (!(is_graph(tr) ^ set))
  135. return 0;
  136. stop_irqsoff_tracer(irqsoff_trace, !set);
  137. for_each_possible_cpu(cpu)
  138. per_cpu(tracing_cpu, cpu) = 0;
  139. tr->max_latency = 0;
  140. tracing_reset_online_cpus(&irqsoff_trace->trace_buffer);
  141. return start_irqsoff_tracer(irqsoff_trace, set);
  142. }
  143. static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
  144. {
  145. struct trace_array *tr = irqsoff_trace;
  146. struct trace_array_cpu *data;
  147. unsigned long flags;
  148. int ret;
  149. int pc;
  150. if (ftrace_graph_ignore_func(trace))
  151. return 0;
  152. /*
  153. * Do not trace a function if it's filtered by set_graph_notrace.
  154. * Make the index of ret stack negative to indicate that it should
  155. * ignore further functions. But it needs its own ret stack entry
  156. * to recover the original index in order to continue tracing after
  157. * returning from the function.
  158. */
  159. if (ftrace_graph_notrace_addr(trace->func))
  160. return 1;
  161. if (!func_prolog_dec(tr, &data, &flags))
  162. return 0;
  163. pc = preempt_count();
  164. ret = __trace_graph_entry(tr, trace, flags, pc);
  165. atomic_dec(&data->disabled);
  166. return ret;
  167. }
  168. static void irqsoff_graph_return(struct ftrace_graph_ret *trace)
  169. {
  170. struct trace_array *tr = irqsoff_trace;
  171. struct trace_array_cpu *data;
  172. unsigned long flags;
  173. int pc;
  174. if (!func_prolog_dec(tr, &data, &flags))
  175. return;
  176. pc = preempt_count();
  177. __trace_graph_return(tr, trace, flags, pc);
  178. atomic_dec(&data->disabled);
  179. }
  180. static void irqsoff_trace_open(struct trace_iterator *iter)
  181. {
  182. if (is_graph(iter->tr))
  183. graph_trace_open(iter);
  184. }
  185. static void irqsoff_trace_close(struct trace_iterator *iter)
  186. {
  187. if (iter->private)
  188. graph_trace_close(iter);
  189. }
  190. #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \
  191. TRACE_GRAPH_PRINT_PROC | \
  192. TRACE_GRAPH_PRINT_ABS_TIME | \
  193. TRACE_GRAPH_PRINT_DURATION)
  194. static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
  195. {
  196. /*
  197. * In graph mode call the graph tracer output function,
  198. * otherwise go with the TRACE_FN event handler
  199. */
  200. if (is_graph(iter->tr))
  201. return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
  202. return TRACE_TYPE_UNHANDLED;
  203. }
  204. static void irqsoff_print_header(struct seq_file *s)
  205. {
  206. struct trace_array *tr = irqsoff_trace;
  207. if (is_graph(tr))
  208. print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
  209. else
  210. trace_default_header(s);
  211. }
  212. static void
  213. __trace_function(struct trace_array *tr,
  214. unsigned long ip, unsigned long parent_ip,
  215. unsigned long flags, int pc)
  216. {
  217. if (is_graph(tr))
  218. trace_graph_function(tr, ip, parent_ip, flags, pc);
  219. else
  220. trace_function(tr, ip, parent_ip, flags, pc);
  221. }
  222. #else
  223. #define __trace_function trace_function
  224. #ifdef CONFIG_FUNCTION_TRACER
  225. static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
  226. {
  227. return -1;
  228. }
  229. #endif
  230. static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
  231. {
  232. return TRACE_TYPE_UNHANDLED;
  233. }
  234. static void irqsoff_trace_open(struct trace_iterator *iter) { }
  235. static void irqsoff_trace_close(struct trace_iterator *iter) { }
  236. #ifdef CONFIG_FUNCTION_TRACER
  237. static void irqsoff_graph_return(struct ftrace_graph_ret *trace) { }
  238. static void irqsoff_print_header(struct seq_file *s)
  239. {
  240. trace_default_header(s);
  241. }
  242. #else
  243. static void irqsoff_print_header(struct seq_file *s)
  244. {
  245. trace_latency_header(s);
  246. }
  247. #endif /* CONFIG_FUNCTION_TRACER */
  248. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
  249. /*
  250. * Should this new latency be reported/recorded?
  251. */
  252. static bool report_latency(struct trace_array *tr, u64 delta)
  253. {
  254. if (tracing_thresh) {
  255. if (delta < tracing_thresh)
  256. return false;
  257. } else {
  258. if (delta <= tr->max_latency)
  259. return false;
  260. }
  261. return true;
  262. }
  263. static void
  264. check_critical_timing(struct trace_array *tr,
  265. struct trace_array_cpu *data,
  266. unsigned long parent_ip,
  267. int cpu)
  268. {
  269. u64 T0, T1, delta;
  270. unsigned long flags;
  271. int pc;
  272. T0 = data->preempt_timestamp;
  273. T1 = ftrace_now(cpu);
  274. delta = T1-T0;
  275. local_save_flags(flags);
  276. pc = preempt_count();
  277. if (!report_latency(tr, delta))
  278. goto out;
  279. raw_spin_lock_irqsave(&max_trace_lock, flags);
  280. /* check if we are still the max latency */
  281. if (!report_latency(tr, delta))
  282. goto out_unlock;
  283. __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
  284. /* Skip 5 functions to get to the irq/preempt enable function */
  285. __trace_stack(tr, flags, 5, pc);
  286. if (data->critical_sequence != max_sequence)
  287. goto out_unlock;
  288. data->critical_end = parent_ip;
  289. if (likely(!is_tracing_stopped())) {
  290. tr->max_latency = delta;
  291. update_max_tr_single(tr, current, cpu);
  292. }
  293. max_sequence++;
  294. out_unlock:
  295. raw_spin_unlock_irqrestore(&max_trace_lock, flags);
  296. out:
  297. data->critical_sequence = max_sequence;
  298. data->preempt_timestamp = ftrace_now(cpu);
  299. __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
  300. }
  301. static inline void
  302. start_critical_timing(unsigned long ip, unsigned long parent_ip)
  303. {
  304. int cpu;
  305. struct trace_array *tr = irqsoff_trace;
  306. struct trace_array_cpu *data;
  307. unsigned long flags;
  308. if (!tracer_enabled || !tracing_is_enabled())
  309. return;
  310. cpu = raw_smp_processor_id();
  311. if (per_cpu(tracing_cpu, cpu))
  312. return;
  313. data = per_cpu_ptr(tr->trace_buffer.data, cpu);
  314. if (unlikely(!data) || atomic_read(&data->disabled))
  315. return;
  316. atomic_inc(&data->disabled);
  317. data->critical_sequence = max_sequence;
  318. data->preempt_timestamp = ftrace_now(cpu);
  319. data->critical_start = parent_ip ? : ip;
  320. local_save_flags(flags);
  321. __trace_function(tr, ip, parent_ip, flags, preempt_count());
  322. per_cpu(tracing_cpu, cpu) = 1;
  323. atomic_dec(&data->disabled);
  324. }
  325. static inline void
  326. stop_critical_timing(unsigned long ip, unsigned long parent_ip)
  327. {
  328. int cpu;
  329. struct trace_array *tr = irqsoff_trace;
  330. struct trace_array_cpu *data;
  331. unsigned long flags;
  332. cpu = raw_smp_processor_id();
  333. /* Always clear the tracing cpu on stopping the trace */
  334. if (unlikely(per_cpu(tracing_cpu, cpu)))
  335. per_cpu(tracing_cpu, cpu) = 0;
  336. else
  337. return;
  338. if (!tracer_enabled || !tracing_is_enabled())
  339. return;
  340. data = per_cpu_ptr(tr->trace_buffer.data, cpu);
  341. if (unlikely(!data) ||
  342. !data->critical_start || atomic_read(&data->disabled))
  343. return;
  344. atomic_inc(&data->disabled);
  345. local_save_flags(flags);
  346. __trace_function(tr, ip, parent_ip, flags, preempt_count());
  347. check_critical_timing(tr, data, parent_ip ? : ip, cpu);
  348. data->critical_start = 0;
  349. atomic_dec(&data->disabled);
  350. }
  351. /* start and stop critical timings used to for stoppage (in idle) */
  352. void start_critical_timings(void)
  353. {
  354. if (preempt_trace() || irq_trace())
  355. start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
  356. }
  357. EXPORT_SYMBOL_GPL(start_critical_timings);
  358. void stop_critical_timings(void)
  359. {
  360. if (preempt_trace() || irq_trace())
  361. stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
  362. }
  363. EXPORT_SYMBOL_GPL(stop_critical_timings);
  364. #ifdef CONFIG_IRQSOFF_TRACER
  365. #ifdef CONFIG_PROVE_LOCKING
  366. void time_hardirqs_on(unsigned long a0, unsigned long a1)
  367. {
  368. if (!preempt_trace() && irq_trace())
  369. stop_critical_timing(a0, a1);
  370. }
  371. void time_hardirqs_off(unsigned long a0, unsigned long a1)
  372. {
  373. if (!preempt_trace() && irq_trace())
  374. start_critical_timing(a0, a1);
  375. }
  376. #else /* !CONFIG_PROVE_LOCKING */
  377. /*
  378. * We are only interested in hardirq on/off events:
  379. */
  380. static inline void tracer_hardirqs_on(void)
  381. {
  382. if (!preempt_trace() && irq_trace())
  383. stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
  384. }
  385. static inline void tracer_hardirqs_off(void)
  386. {
  387. if (!preempt_trace() && irq_trace())
  388. start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
  389. }
  390. static inline void tracer_hardirqs_on_caller(unsigned long caller_addr)
  391. {
  392. if (!preempt_trace() && irq_trace())
  393. stop_critical_timing(CALLER_ADDR0, caller_addr);
  394. }
  395. static inline void tracer_hardirqs_off_caller(unsigned long caller_addr)
  396. {
  397. if (!preempt_trace() && irq_trace())
  398. start_critical_timing(CALLER_ADDR0, caller_addr);
  399. }
  400. #endif /* CONFIG_PROVE_LOCKING */
  401. #endif /* CONFIG_IRQSOFF_TRACER */
  402. #ifdef CONFIG_PREEMPT_TRACER
  403. static inline void tracer_preempt_on(unsigned long a0, unsigned long a1)
  404. {
  405. if (preempt_trace() && !irq_trace())
  406. stop_critical_timing(a0, a1);
  407. }
  408. static inline void tracer_preempt_off(unsigned long a0, unsigned long a1)
  409. {
  410. if (preempt_trace() && !irq_trace())
  411. start_critical_timing(a0, a1);
  412. }
  413. #endif /* CONFIG_PREEMPT_TRACER */
  414. #ifdef CONFIG_FUNCTION_TRACER
  415. static bool function_enabled;
  416. static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
  417. {
  418. int ret;
  419. /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
  420. if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION)))
  421. return 0;
  422. if (graph)
  423. ret = register_ftrace_graph(&irqsoff_graph_return,
  424. &irqsoff_graph_entry);
  425. else
  426. ret = register_ftrace_function(tr->ops);
  427. if (!ret)
  428. function_enabled = true;
  429. return ret;
  430. }
  431. static void unregister_irqsoff_function(struct trace_array *tr, int graph)
  432. {
  433. if (!function_enabled)
  434. return;
  435. if (graph)
  436. unregister_ftrace_graph();
  437. else
  438. unregister_ftrace_function(tr->ops);
  439. function_enabled = false;
  440. }
  441. static int irqsoff_function_set(struct trace_array *tr, u32 mask, int set)
  442. {
  443. if (!(mask & TRACE_ITER_FUNCTION))
  444. return 0;
  445. if (set)
  446. register_irqsoff_function(tr, is_graph(tr), 1);
  447. else
  448. unregister_irqsoff_function(tr, is_graph(tr));
  449. return 1;
  450. }
  451. #else
  452. static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
  453. {
  454. return 0;
  455. }
  456. static void unregister_irqsoff_function(struct trace_array *tr, int graph) { }
  457. static inline int irqsoff_function_set(struct trace_array *tr, u32 mask, int set)
  458. {
  459. return 0;
  460. }
  461. #endif /* CONFIG_FUNCTION_TRACER */
  462. static int irqsoff_flag_changed(struct trace_array *tr, u32 mask, int set)
  463. {
  464. struct tracer *tracer = tr->current_trace;
  465. if (irqsoff_function_set(tr, mask, set))
  466. return 0;
  467. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  468. if (mask & TRACE_ITER_DISPLAY_GRAPH)
  469. return irqsoff_display_graph(tr, set);
  470. #endif
  471. return trace_keep_overwrite(tracer, mask, set);
  472. }
  473. static int start_irqsoff_tracer(struct trace_array *tr, int graph)
  474. {
  475. int ret;
  476. ret = register_irqsoff_function(tr, graph, 0);
  477. if (!ret && tracing_is_enabled())
  478. tracer_enabled = 1;
  479. else
  480. tracer_enabled = 0;
  481. return ret;
  482. }
  483. static void stop_irqsoff_tracer(struct trace_array *tr, int graph)
  484. {
  485. tracer_enabled = 0;
  486. unregister_irqsoff_function(tr, graph);
  487. }
  488. static bool irqsoff_busy;
  489. static int __irqsoff_tracer_init(struct trace_array *tr)
  490. {
  491. if (irqsoff_busy)
  492. return -EBUSY;
  493. save_flags = tr->trace_flags;
  494. /* non overwrite screws up the latency tracers */
  495. set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
  496. set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
  497. tr->max_latency = 0;
  498. irqsoff_trace = tr;
  499. /* make sure that the tracer is visible */
  500. smp_wmb();
  501. ftrace_init_array_ops(tr, irqsoff_tracer_call);
  502. /* Only toplevel instance supports graph tracing */
  503. if (start_irqsoff_tracer(tr, (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
  504. is_graph(tr))))
  505. printk(KERN_ERR "failed to start irqsoff tracer\n");
  506. irqsoff_busy = true;
  507. return 0;
  508. }
  509. static void irqsoff_tracer_reset(struct trace_array *tr)
  510. {
  511. int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
  512. int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
  513. stop_irqsoff_tracer(tr, is_graph(tr));
  514. set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
  515. set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
  516. ftrace_reset_array_ops(tr);
  517. irqsoff_busy = false;
  518. }
  519. static void irqsoff_tracer_start(struct trace_array *tr)
  520. {
  521. tracer_enabled = 1;
  522. }
  523. static void irqsoff_tracer_stop(struct trace_array *tr)
  524. {
  525. tracer_enabled = 0;
  526. }
  527. #ifdef CONFIG_IRQSOFF_TRACER
  528. static int irqsoff_tracer_init(struct trace_array *tr)
  529. {
  530. trace_type = TRACER_IRQS_OFF;
  531. return __irqsoff_tracer_init(tr);
  532. }
  533. static struct tracer irqsoff_tracer __read_mostly =
  534. {
  535. .name = "irqsoff",
  536. .init = irqsoff_tracer_init,
  537. .reset = irqsoff_tracer_reset,
  538. .start = irqsoff_tracer_start,
  539. .stop = irqsoff_tracer_stop,
  540. .print_max = true,
  541. .print_header = irqsoff_print_header,
  542. .print_line = irqsoff_print_line,
  543. .flag_changed = irqsoff_flag_changed,
  544. #ifdef CONFIG_FTRACE_SELFTEST
  545. .selftest = trace_selftest_startup_irqsoff,
  546. #endif
  547. .open = irqsoff_trace_open,
  548. .close = irqsoff_trace_close,
  549. .allow_instances = true,
  550. .use_max_tr = true,
  551. };
  552. # define register_irqsoff(trace) register_tracer(&trace)
  553. #else
  554. # define register_irqsoff(trace) do { } while (0)
  555. #endif
  556. #ifdef CONFIG_PREEMPT_TRACER
  557. static int preemptoff_tracer_init(struct trace_array *tr)
  558. {
  559. trace_type = TRACER_PREEMPT_OFF;
  560. return __irqsoff_tracer_init(tr);
  561. }
  562. static struct tracer preemptoff_tracer __read_mostly =
  563. {
  564. .name = "preemptoff",
  565. .init = preemptoff_tracer_init,
  566. .reset = irqsoff_tracer_reset,
  567. .start = irqsoff_tracer_start,
  568. .stop = irqsoff_tracer_stop,
  569. .print_max = true,
  570. .print_header = irqsoff_print_header,
  571. .print_line = irqsoff_print_line,
  572. .flag_changed = irqsoff_flag_changed,
  573. #ifdef CONFIG_FTRACE_SELFTEST
  574. .selftest = trace_selftest_startup_preemptoff,
  575. #endif
  576. .open = irqsoff_trace_open,
  577. .close = irqsoff_trace_close,
  578. .allow_instances = true,
  579. .use_max_tr = true,
  580. };
  581. # define register_preemptoff(trace) register_tracer(&trace)
  582. #else
  583. # define register_preemptoff(trace) do { } while (0)
  584. #endif
  585. #if defined(CONFIG_IRQSOFF_TRACER) && \
  586. defined(CONFIG_PREEMPT_TRACER)
  587. static int preemptirqsoff_tracer_init(struct trace_array *tr)
  588. {
  589. trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF;
  590. return __irqsoff_tracer_init(tr);
  591. }
  592. static struct tracer preemptirqsoff_tracer __read_mostly =
  593. {
  594. .name = "preemptirqsoff",
  595. .init = preemptirqsoff_tracer_init,
  596. .reset = irqsoff_tracer_reset,
  597. .start = irqsoff_tracer_start,
  598. .stop = irqsoff_tracer_stop,
  599. .print_max = true,
  600. .print_header = irqsoff_print_header,
  601. .print_line = irqsoff_print_line,
  602. .flag_changed = irqsoff_flag_changed,
  603. #ifdef CONFIG_FTRACE_SELFTEST
  604. .selftest = trace_selftest_startup_preemptirqsoff,
  605. #endif
  606. .open = irqsoff_trace_open,
  607. .close = irqsoff_trace_close,
  608. .allow_instances = true,
  609. .use_max_tr = true,
  610. };
  611. # define register_preemptirqsoff(trace) register_tracer(&trace)
  612. #else
  613. # define register_preemptirqsoff(trace) do { } while (0)
  614. #endif
  615. __init static int init_irqsoff_tracer(void)
  616. {
  617. register_irqsoff(irqsoff_tracer);
  618. register_preemptoff(preemptoff_tracer);
  619. register_preemptirqsoff(preemptirqsoff_tracer);
  620. return 0;
  621. }
  622. core_initcall(init_irqsoff_tracer);
  623. #endif /* IRQSOFF_TRACER || PREEMPTOFF_TRACER */
  624. #ifndef CONFIG_IRQSOFF_TRACER
  625. static inline void tracer_hardirqs_on(void) { }
  626. static inline void tracer_hardirqs_off(void) { }
  627. static inline void tracer_hardirqs_on_caller(unsigned long caller_addr) { }
  628. static inline void tracer_hardirqs_off_caller(unsigned long caller_addr) { }
  629. #endif
  630. #ifndef CONFIG_PREEMPT_TRACER
  631. static inline void tracer_preempt_on(unsigned long a0, unsigned long a1) { }
  632. static inline void tracer_preempt_off(unsigned long a0, unsigned long a1) { }
  633. #endif
  634. #if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PROVE_LOCKING)
  635. /* Per-cpu variable to prevent redundant calls when IRQs already off */
  636. static DEFINE_PER_CPU(int, tracing_irq_cpu);
  637. void trace_hardirqs_on(void)
  638. {
  639. if (!this_cpu_read(tracing_irq_cpu))
  640. return;
  641. trace_irq_enable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
  642. tracer_hardirqs_on();
  643. this_cpu_write(tracing_irq_cpu, 0);
  644. }
  645. EXPORT_SYMBOL(trace_hardirqs_on);
  646. void trace_hardirqs_off(void)
  647. {
  648. if (this_cpu_read(tracing_irq_cpu))
  649. return;
  650. this_cpu_write(tracing_irq_cpu, 1);
  651. trace_irq_disable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
  652. tracer_hardirqs_off();
  653. }
  654. EXPORT_SYMBOL(trace_hardirqs_off);
  655. __visible void trace_hardirqs_on_caller(unsigned long caller_addr)
  656. {
  657. if (!this_cpu_read(tracing_irq_cpu))
  658. return;
  659. trace_irq_enable_rcuidle(CALLER_ADDR0, caller_addr);
  660. tracer_hardirqs_on_caller(caller_addr);
  661. this_cpu_write(tracing_irq_cpu, 0);
  662. }
  663. EXPORT_SYMBOL(trace_hardirqs_on_caller);
  664. __visible void trace_hardirqs_off_caller(unsigned long caller_addr)
  665. {
  666. if (this_cpu_read(tracing_irq_cpu))
  667. return;
  668. this_cpu_write(tracing_irq_cpu, 1);
  669. trace_irq_disable_rcuidle(CALLER_ADDR0, caller_addr);
  670. tracer_hardirqs_off_caller(caller_addr);
  671. }
  672. EXPORT_SYMBOL(trace_hardirqs_off_caller);
  673. /*
  674. * Stubs:
  675. */
  676. void trace_softirqs_on(unsigned long ip)
  677. {
  678. }
  679. void trace_softirqs_off(unsigned long ip)
  680. {
  681. }
  682. inline void print_irqtrace_events(struct task_struct *curr)
  683. {
  684. }
  685. #endif
  686. #if defined(CONFIG_PREEMPT_TRACER) || \
  687. (defined(CONFIG_DEBUG_PREEMPT) && defined(CONFIG_PREEMPTIRQ_EVENTS))
  688. void trace_preempt_on(unsigned long a0, unsigned long a1)
  689. {
  690. trace_preempt_enable_rcuidle(a0, a1);
  691. tracer_preempt_on(a0, a1);
  692. }
  693. void trace_preempt_off(unsigned long a0, unsigned long a1)
  694. {
  695. trace_preempt_disable_rcuidle(a0, a1);
  696. tracer_preempt_off(a0, a1);
  697. }
  698. #endif