trace_stack.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468
  1. /*
  2. * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
  3. *
  4. */
  5. #include <linux/stacktrace.h>
  6. #include <linux/kallsyms.h>
  7. #include <linux/seq_file.h>
  8. #include <linux/spinlock.h>
  9. #include <linux/uaccess.h>
  10. #include <linux/ftrace.h>
  11. #include <linux/module.h>
  12. #include <linux/sysctl.h>
  13. #include <linux/init.h>
  14. #include <asm/setup.h>
  15. #include "trace.h"
  16. static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
  17. { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
  18. unsigned stack_trace_index[STACK_TRACE_ENTRIES];
  19. /*
  20. * Reserve one entry for the passed in ip. This will allow
  21. * us to remove most or all of the stack size overhead
  22. * added by the stack tracer itself.
  23. */
  24. struct stack_trace stack_trace_max = {
  25. .max_entries = STACK_TRACE_ENTRIES - 1,
  26. .entries = &stack_dump_trace[0],
  27. };
  28. unsigned long stack_trace_max_size;
  29. arch_spinlock_t stack_trace_max_lock =
  30. (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
  31. static DEFINE_PER_CPU(int, trace_active);
  32. static DEFINE_MUTEX(stack_sysctl_mutex);
  33. int stack_tracer_enabled;
  34. static int last_stack_tracer_enabled;
  35. void stack_trace_print(void)
  36. {
  37. long i;
  38. int size;
  39. pr_emerg(" Depth Size Location (%d entries)\n"
  40. " ----- ---- --------\n",
  41. stack_trace_max.nr_entries);
  42. for (i = 0; i < stack_trace_max.nr_entries; i++) {
  43. if (stack_dump_trace[i] == ULONG_MAX)
  44. break;
  45. if (i+1 == stack_trace_max.nr_entries ||
  46. stack_dump_trace[i+1] == ULONG_MAX)
  47. size = stack_trace_index[i];
  48. else
  49. size = stack_trace_index[i] - stack_trace_index[i+1];
  50. pr_emerg("%3ld) %8d %5d %pS\n", i, stack_trace_index[i],
  51. size, (void *)stack_dump_trace[i]);
  52. }
  53. }
  54. /*
  55. * When arch-specific code overides this function, the following
  56. * data should be filled up, assuming stack_trace_max_lock is held to
  57. * prevent concurrent updates.
  58. * stack_trace_index[]
  59. * stack_trace_max
  60. * stack_trace_max_size
  61. */
  62. void __weak
  63. check_stack(unsigned long ip, unsigned long *stack)
  64. {
  65. unsigned long this_size, flags; unsigned long *p, *top, *start;
  66. static int tracer_frame;
  67. int frame_size = ACCESS_ONCE(tracer_frame);
  68. int i, x;
  69. this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
  70. this_size = THREAD_SIZE - this_size;
  71. /* Remove the frame of the tracer */
  72. this_size -= frame_size;
  73. if (this_size <= stack_trace_max_size)
  74. return;
  75. /* we do not handle interrupt stacks yet */
  76. if (!object_is_on_stack(stack))
  77. return;
  78. local_irq_save(flags);
  79. arch_spin_lock(&stack_trace_max_lock);
  80. /* In case another CPU set the tracer_frame on us */
  81. if (unlikely(!frame_size))
  82. this_size -= tracer_frame;
  83. /* a race could have already updated it */
  84. if (this_size <= stack_trace_max_size)
  85. goto out;
  86. stack_trace_max_size = this_size;
  87. stack_trace_max.nr_entries = 0;
  88. stack_trace_max.skip = 3;
  89. save_stack_trace(&stack_trace_max);
  90. /* Skip over the overhead of the stack tracer itself */
  91. for (i = 0; i < stack_trace_max.nr_entries; i++) {
  92. if (stack_dump_trace[i] == ip)
  93. break;
  94. }
  95. /*
  96. * Now find where in the stack these are.
  97. */
  98. x = 0;
  99. start = stack;
  100. top = (unsigned long *)
  101. (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
  102. /*
  103. * Loop through all the entries. One of the entries may
  104. * for some reason be missed on the stack, so we may
  105. * have to account for them. If they are all there, this
  106. * loop will only happen once. This code only takes place
  107. * on a new max, so it is far from a fast path.
  108. */
  109. while (i < stack_trace_max.nr_entries) {
  110. int found = 0;
  111. stack_trace_index[x] = this_size;
  112. p = start;
  113. for (; p < top && i < stack_trace_max.nr_entries; p++) {
  114. if (stack_dump_trace[i] == ULONG_MAX)
  115. break;
  116. if (*p == stack_dump_trace[i]) {
  117. stack_dump_trace[x] = stack_dump_trace[i++];
  118. this_size = stack_trace_index[x++] =
  119. (top - p) * sizeof(unsigned long);
  120. found = 1;
  121. /* Start the search from here */
  122. start = p + 1;
  123. /*
  124. * We do not want to show the overhead
  125. * of the stack tracer stack in the
  126. * max stack. If we haven't figured
  127. * out what that is, then figure it out
  128. * now.
  129. */
  130. if (unlikely(!tracer_frame)) {
  131. tracer_frame = (p - stack) *
  132. sizeof(unsigned long);
  133. stack_trace_max_size -= tracer_frame;
  134. }
  135. }
  136. }
  137. if (!found)
  138. i++;
  139. }
  140. stack_trace_max.nr_entries = x;
  141. for (; x < i; x++)
  142. stack_dump_trace[x] = ULONG_MAX;
  143. if (task_stack_end_corrupted(current)) {
  144. stack_trace_print();
  145. BUG();
  146. }
  147. out:
  148. arch_spin_unlock(&stack_trace_max_lock);
  149. local_irq_restore(flags);
  150. }
  151. static void
  152. stack_trace_call(unsigned long ip, unsigned long parent_ip,
  153. struct ftrace_ops *op, struct pt_regs *pt_regs)
  154. {
  155. unsigned long stack;
  156. int cpu;
  157. preempt_disable_notrace();
  158. cpu = raw_smp_processor_id();
  159. /* no atomic needed, we only modify this variable by this cpu */
  160. if (per_cpu(trace_active, cpu)++ != 0)
  161. goto out;
  162. ip += MCOUNT_INSN_SIZE;
  163. check_stack(ip, &stack);
  164. out:
  165. per_cpu(trace_active, cpu)--;
  166. /* prevent recursion in schedule */
  167. preempt_enable_notrace();
  168. }
  169. static struct ftrace_ops trace_ops __read_mostly =
  170. {
  171. .func = stack_trace_call,
  172. .flags = FTRACE_OPS_FL_RECURSION_SAFE,
  173. };
  174. static ssize_t
  175. stack_max_size_read(struct file *filp, char __user *ubuf,
  176. size_t count, loff_t *ppos)
  177. {
  178. unsigned long *ptr = filp->private_data;
  179. char buf[64];
  180. int r;
  181. r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
  182. if (r > sizeof(buf))
  183. r = sizeof(buf);
  184. return simple_read_from_buffer(ubuf, count, ppos, buf, r);
  185. }
  186. static ssize_t
  187. stack_max_size_write(struct file *filp, const char __user *ubuf,
  188. size_t count, loff_t *ppos)
  189. {
  190. long *ptr = filp->private_data;
  191. unsigned long val, flags;
  192. int ret;
  193. int cpu;
  194. ret = kstrtoul_from_user(ubuf, count, 10, &val);
  195. if (ret)
  196. return ret;
  197. local_irq_save(flags);
  198. /*
  199. * In case we trace inside arch_spin_lock() or after (NMI),
  200. * we will cause circular lock, so we also need to increase
  201. * the percpu trace_active here.
  202. */
  203. cpu = smp_processor_id();
  204. per_cpu(trace_active, cpu)++;
  205. arch_spin_lock(&stack_trace_max_lock);
  206. *ptr = val;
  207. arch_spin_unlock(&stack_trace_max_lock);
  208. per_cpu(trace_active, cpu)--;
  209. local_irq_restore(flags);
  210. return count;
  211. }
  212. static const struct file_operations stack_max_size_fops = {
  213. .open = tracing_open_generic,
  214. .read = stack_max_size_read,
  215. .write = stack_max_size_write,
  216. .llseek = default_llseek,
  217. };
  218. static void *
  219. __next(struct seq_file *m, loff_t *pos)
  220. {
  221. long n = *pos - 1;
  222. if (n > stack_trace_max.nr_entries || stack_dump_trace[n] == ULONG_MAX)
  223. return NULL;
  224. m->private = (void *)n;
  225. return &m->private;
  226. }
  227. static void *
  228. t_next(struct seq_file *m, void *v, loff_t *pos)
  229. {
  230. (*pos)++;
  231. return __next(m, pos);
  232. }
  233. static void *t_start(struct seq_file *m, loff_t *pos)
  234. {
  235. int cpu;
  236. local_irq_disable();
  237. cpu = smp_processor_id();
  238. per_cpu(trace_active, cpu)++;
  239. arch_spin_lock(&stack_trace_max_lock);
  240. if (*pos == 0)
  241. return SEQ_START_TOKEN;
  242. return __next(m, pos);
  243. }
  244. static void t_stop(struct seq_file *m, void *p)
  245. {
  246. int cpu;
  247. arch_spin_unlock(&stack_trace_max_lock);
  248. cpu = smp_processor_id();
  249. per_cpu(trace_active, cpu)--;
  250. local_irq_enable();
  251. }
  252. static void trace_lookup_stack(struct seq_file *m, long i)
  253. {
  254. unsigned long addr = stack_dump_trace[i];
  255. seq_printf(m, "%pS\n", (void *)addr);
  256. }
  257. static void print_disabled(struct seq_file *m)
  258. {
  259. seq_puts(m, "#\n"
  260. "# Stack tracer disabled\n"
  261. "#\n"
  262. "# To enable the stack tracer, either add 'stacktrace' to the\n"
  263. "# kernel command line\n"
  264. "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
  265. "#\n");
  266. }
  267. static int t_show(struct seq_file *m, void *v)
  268. {
  269. long i;
  270. int size;
  271. if (v == SEQ_START_TOKEN) {
  272. seq_printf(m, " Depth Size Location"
  273. " (%d entries)\n"
  274. " ----- ---- --------\n",
  275. stack_trace_max.nr_entries);
  276. if (!stack_tracer_enabled && !stack_trace_max_size)
  277. print_disabled(m);
  278. return 0;
  279. }
  280. i = *(long *)v;
  281. if (i >= stack_trace_max.nr_entries ||
  282. stack_dump_trace[i] == ULONG_MAX)
  283. return 0;
  284. if (i+1 == stack_trace_max.nr_entries ||
  285. stack_dump_trace[i+1] == ULONG_MAX)
  286. size = stack_trace_index[i];
  287. else
  288. size = stack_trace_index[i] - stack_trace_index[i+1];
  289. seq_printf(m, "%3ld) %8d %5d ", i, stack_trace_index[i], size);
  290. trace_lookup_stack(m, i);
  291. return 0;
  292. }
  293. static const struct seq_operations stack_trace_seq_ops = {
  294. .start = t_start,
  295. .next = t_next,
  296. .stop = t_stop,
  297. .show = t_show,
  298. };
  299. static int stack_trace_open(struct inode *inode, struct file *file)
  300. {
  301. return seq_open(file, &stack_trace_seq_ops);
  302. }
  303. static const struct file_operations stack_trace_fops = {
  304. .open = stack_trace_open,
  305. .read = seq_read,
  306. .llseek = seq_lseek,
  307. .release = seq_release,
  308. };
  309. static int
  310. stack_trace_filter_open(struct inode *inode, struct file *file)
  311. {
  312. return ftrace_regex_open(&trace_ops, FTRACE_ITER_FILTER,
  313. inode, file);
  314. }
  315. static const struct file_operations stack_trace_filter_fops = {
  316. .open = stack_trace_filter_open,
  317. .read = seq_read,
  318. .write = ftrace_filter_write,
  319. .llseek = tracing_lseek,
  320. .release = ftrace_regex_release,
  321. };
  322. int
  323. stack_trace_sysctl(struct ctl_table *table, int write,
  324. void __user *buffer, size_t *lenp,
  325. loff_t *ppos)
  326. {
  327. int ret;
  328. mutex_lock(&stack_sysctl_mutex);
  329. ret = proc_dointvec(table, write, buffer, lenp, ppos);
  330. if (ret || !write ||
  331. (last_stack_tracer_enabled == !!stack_tracer_enabled))
  332. goto out;
  333. last_stack_tracer_enabled = !!stack_tracer_enabled;
  334. if (stack_tracer_enabled)
  335. register_ftrace_function(&trace_ops);
  336. else
  337. unregister_ftrace_function(&trace_ops);
  338. out:
  339. mutex_unlock(&stack_sysctl_mutex);
  340. return ret;
  341. }
  342. static char stack_trace_filter_buf[COMMAND_LINE_SIZE+1] __initdata;
  343. static __init int enable_stacktrace(char *str)
  344. {
  345. if (strncmp(str, "_filter=", 8) == 0)
  346. strncpy(stack_trace_filter_buf, str+8, COMMAND_LINE_SIZE);
  347. stack_tracer_enabled = 1;
  348. last_stack_tracer_enabled = 1;
  349. return 1;
  350. }
  351. __setup("stacktrace", enable_stacktrace);
  352. static __init int stack_trace_init(void)
  353. {
  354. struct dentry *d_tracer;
  355. d_tracer = tracing_init_dentry();
  356. if (IS_ERR(d_tracer))
  357. return 0;
  358. trace_create_file("stack_max_size", 0644, d_tracer,
  359. &stack_trace_max_size, &stack_max_size_fops);
  360. trace_create_file("stack_trace", 0444, d_tracer,
  361. NULL, &stack_trace_fops);
  362. trace_create_file("stack_trace_filter", 0444, d_tracer,
  363. NULL, &stack_trace_filter_fops);
  364. if (stack_trace_filter_buf[0])
  365. ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1);
  366. if (stack_tracer_enabled)
  367. register_ftrace_function(&trace_ops);
  368. return 0;
  369. }
  370. device_initcall(stack_trace_init);