trace_selftest.c 8.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420
  1. /* Include in trace.c */
  2. #include <linux/kthread.h>
  3. #include <linux/delay.h>
  4. static inline int trace_valid_entry(struct trace_entry *entry)
  5. {
  6. switch (entry->type) {
  7. case TRACE_FN:
  8. case TRACE_CTX:
  9. return 1;
  10. }
  11. return 0;
  12. }
  13. static int
  14. trace_test_buffer_cpu(struct trace_array *tr, struct trace_array_cpu *data)
  15. {
  16. struct trace_entry *entries;
  17. struct page *page;
  18. int idx = 0;
  19. int i;
  20. BUG_ON(list_empty(&data->trace_pages));
  21. page = list_entry(data->trace_pages.next, struct page, lru);
  22. entries = page_address(page);
  23. if (head_page(data) != entries)
  24. goto failed;
  25. /*
  26. * The starting trace buffer always has valid elements,
  27. * if any element exists.
  28. */
  29. entries = head_page(data);
  30. for (i = 0; i < tr->entries; i++) {
  31. if (i < data->trace_idx && !trace_valid_entry(&entries[idx])) {
  32. printk(KERN_CONT ".. invalid entry %d ",
  33. entries[idx].type);
  34. goto failed;
  35. }
  36. idx++;
  37. if (idx >= ENTRIES_PER_PAGE) {
  38. page = virt_to_page(entries);
  39. if (page->lru.next == &data->trace_pages) {
  40. if (i != tr->entries - 1) {
  41. printk(KERN_CONT ".. entries buffer mismatch");
  42. goto failed;
  43. }
  44. } else {
  45. page = list_entry(page->lru.next, struct page, lru);
  46. entries = page_address(page);
  47. }
  48. idx = 0;
  49. }
  50. }
  51. page = virt_to_page(entries);
  52. if (page->lru.next != &data->trace_pages) {
  53. printk(KERN_CONT ".. too many entries");
  54. goto failed;
  55. }
  56. return 0;
  57. failed:
  58. printk(KERN_CONT ".. corrupted trace buffer .. ");
  59. return -1;
  60. }
  61. /*
  62. * Test the trace buffer to see if all the elements
  63. * are still sane.
  64. */
  65. static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
  66. {
  67. unsigned long cnt = 0;
  68. int cpu;
  69. int ret = 0;
  70. for_each_possible_cpu(cpu) {
  71. if (!head_page(tr->data[cpu]))
  72. continue;
  73. cnt += tr->data[cpu]->trace_idx;
  74. ret = trace_test_buffer_cpu(tr, tr->data[cpu]);
  75. if (ret)
  76. break;
  77. }
  78. if (count)
  79. *count = cnt;
  80. return ret;
  81. }
  82. #ifdef CONFIG_FTRACE
  83. /*
  84. * Simple verification test of ftrace function tracer.
  85. * Enable ftrace, sleep 1/10 second, and then read the trace
  86. * buffer to see if all is in order.
  87. */
  88. int
  89. trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
  90. {
  91. unsigned long count;
  92. int ret;
  93. /* make sure functions have been recorded */
  94. ret = ftrace_force_update();
  95. if (ret) {
  96. printk(KERN_CONT ".. ftraced failed .. ");
  97. return ret;
  98. }
  99. /* start the tracing */
  100. ftrace_enabled = 1;
  101. tr->ctrl = 1;
  102. trace->init(tr);
  103. /* Sleep for a 1/10 of a second */
  104. msleep(100);
  105. /* stop the tracing. */
  106. tr->ctrl = 0;
  107. trace->ctrl_update(tr);
  108. ftrace_enabled = 0;
  109. /* check the trace buffer */
  110. ret = trace_test_buffer(tr, &count);
  111. trace->reset(tr);
  112. if (!ret && !count) {
  113. printk(KERN_CONT ".. no entries found ..");
  114. ret = -1;
  115. }
  116. return ret;
  117. }
  118. #endif /* CONFIG_FTRACE */
  119. #ifdef CONFIG_IRQSOFF_TRACER
  120. int
  121. trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
  122. {
  123. unsigned long save_max = tracing_max_latency;
  124. unsigned long count;
  125. int ret;
  126. /* start the tracing */
  127. tr->ctrl = 1;
  128. trace->init(tr);
  129. /* reset the max latency */
  130. tracing_max_latency = 0;
  131. /* disable interrupts for a bit */
  132. local_irq_disable();
  133. udelay(100);
  134. local_irq_enable();
  135. /* stop the tracing. */
  136. tr->ctrl = 0;
  137. trace->ctrl_update(tr);
  138. /* check both trace buffers */
  139. ret = trace_test_buffer(tr, NULL);
  140. if (!ret)
  141. ret = trace_test_buffer(&max_tr, &count);
  142. trace->reset(tr);
  143. if (!ret && !count) {
  144. printk(KERN_CONT ".. no entries found ..");
  145. ret = -1;
  146. }
  147. tracing_max_latency = save_max;
  148. return ret;
  149. }
  150. #endif /* CONFIG_IRQSOFF_TRACER */
  151. #ifdef CONFIG_PREEMPT_TRACER
  152. int
  153. trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
  154. {
  155. unsigned long save_max = tracing_max_latency;
  156. unsigned long count;
  157. int ret;
  158. /* start the tracing */
  159. tr->ctrl = 1;
  160. trace->init(tr);
  161. /* reset the max latency */
  162. tracing_max_latency = 0;
  163. /* disable preemption for a bit */
  164. preempt_disable();
  165. udelay(100);
  166. preempt_enable();
  167. /* stop the tracing. */
  168. tr->ctrl = 0;
  169. trace->ctrl_update(tr);
  170. /* check both trace buffers */
  171. ret = trace_test_buffer(tr, NULL);
  172. if (!ret)
  173. ret = trace_test_buffer(&max_tr, &count);
  174. trace->reset(tr);
  175. if (!ret && !count) {
  176. printk(KERN_CONT ".. no entries found ..");
  177. ret = -1;
  178. }
  179. tracing_max_latency = save_max;
  180. return ret;
  181. }
  182. #endif /* CONFIG_PREEMPT_TRACER */
  183. #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
  184. int
  185. trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
  186. {
  187. unsigned long save_max = tracing_max_latency;
  188. unsigned long count;
  189. int ret;
  190. /* start the tracing */
  191. tr->ctrl = 1;
  192. trace->init(tr);
  193. /* reset the max latency */
  194. tracing_max_latency = 0;
  195. /* disable preemption and interrupts for a bit */
  196. preempt_disable();
  197. local_irq_disable();
  198. udelay(100);
  199. preempt_enable();
  200. /* reverse the order of preempt vs irqs */
  201. local_irq_enable();
  202. /* stop the tracing. */
  203. tr->ctrl = 0;
  204. trace->ctrl_update(tr);
  205. /* check both trace buffers */
  206. ret = trace_test_buffer(tr, NULL);
  207. if (ret)
  208. goto out;
  209. ret = trace_test_buffer(&max_tr, &count);
  210. if (ret)
  211. goto out;
  212. if (!ret && !count) {
  213. printk(KERN_CONT ".. no entries found ..");
  214. ret = -1;
  215. goto out;
  216. }
  217. /* do the test by disabling interrupts first this time */
  218. tracing_max_latency = 0;
  219. tr->ctrl = 1;
  220. trace->ctrl_update(tr);
  221. preempt_disable();
  222. local_irq_disable();
  223. udelay(100);
  224. preempt_enable();
  225. /* reverse the order of preempt vs irqs */
  226. local_irq_enable();
  227. /* stop the tracing. */
  228. tr->ctrl = 0;
  229. trace->ctrl_update(tr);
  230. /* check both trace buffers */
  231. ret = trace_test_buffer(tr, NULL);
  232. if (ret)
  233. goto out;
  234. ret = trace_test_buffer(&max_tr, &count);
  235. if (!ret && !count) {
  236. printk(KERN_CONT ".. no entries found ..");
  237. ret = -1;
  238. goto out;
  239. }
  240. out:
  241. trace->reset(tr);
  242. tracing_max_latency = save_max;
  243. return ret;
  244. }
  245. #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
  246. #ifdef CONFIG_SCHED_TRACER
  247. static int trace_wakeup_test_thread(void *data)
  248. {
  249. struct completion *x = data;
  250. /* Make this a RT thread, doesn't need to be too high */
  251. rt_mutex_setprio(current, MAX_RT_PRIO - 5);
  252. /* Make it know we have a new prio */
  253. complete(x);
  254. /* now go to sleep and let the test wake us up */
  255. set_current_state(TASK_INTERRUPTIBLE);
  256. schedule();
  257. /* we are awake, now wait to disappear */
  258. while (!kthread_should_stop()) {
  259. /*
  260. * This is an RT task, do short sleeps to let
  261. * others run.
  262. */
  263. msleep(100);
  264. }
  265. return 0;
  266. }
  267. int
  268. trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
  269. {
  270. unsigned long save_max = tracing_max_latency;
  271. struct task_struct *p;
  272. struct completion isrt;
  273. unsigned long count;
  274. int ret;
  275. init_completion(&isrt);
  276. /* create a high prio thread */
  277. p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
  278. if (IS_ERR(p)) {
  279. printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
  280. return -1;
  281. }
  282. /* make sure the thread is running at an RT prio */
  283. wait_for_completion(&isrt);
  284. /* start the tracing */
  285. tr->ctrl = 1;
  286. trace->init(tr);
  287. /* reset the max latency */
  288. tracing_max_latency = 0;
  289. /* sleep to let the RT thread sleep too */
  290. msleep(100);
  291. /*
  292. * Yes this is slightly racy. It is possible that for some
  293. * strange reason that the RT thread we created, did not
  294. * call schedule for 100ms after doing the completion,
  295. * and we do a wakeup on a task that already is awake.
  296. * But that is extremely unlikely, and the worst thing that
  297. * happens in such a case, is that we disable tracing.
  298. * Honestly, if this race does happen something is horrible
  299. * wrong with the system.
  300. */
  301. wake_up_process(p);
  302. /* stop the tracing. */
  303. tr->ctrl = 0;
  304. trace->ctrl_update(tr);
  305. /* check both trace buffers */
  306. ret = trace_test_buffer(tr, NULL);
  307. if (!ret)
  308. ret = trace_test_buffer(&max_tr, &count);
  309. trace->reset(tr);
  310. tracing_max_latency = save_max;
  311. /* kill the thread */
  312. kthread_stop(p);
  313. if (!ret && !count) {
  314. printk(KERN_CONT ".. no entries found ..");
  315. ret = -1;
  316. }
  317. return ret;
  318. }
  319. #endif /* CONFIG_SCHED_TRACER */
  320. #ifdef CONFIG_CONTEXT_SWITCH_TRACER
  321. int
  322. trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
  323. {
  324. unsigned long count;
  325. int ret;
  326. /* start the tracing */
  327. tr->ctrl = 1;
  328. trace->init(tr);
  329. /* Sleep for a 1/10 of a second */
  330. msleep(100);
  331. /* stop the tracing. */
  332. tr->ctrl = 0;
  333. trace->ctrl_update(tr);
  334. /* check the trace buffer */
  335. ret = trace_test_buffer(tr, &count);
  336. trace->reset(tr);
  337. if (!ret && !count) {
  338. printk(KERN_CONT ".. no entries found ..");
  339. ret = -1;
  340. }
  341. return ret;
  342. }
  343. #endif /* CONFIG_CONTEXT_SWITCH_TRACER */
  344. #ifdef CONFIG_DYNAMIC_FTRACE
  345. #endif /* CONFIG_DYNAMIC_FTRACE */