trace_stack.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462
  1. /*
  2. * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
  3. *
  4. */
  5. #include <linux/stacktrace.h>
  6. #include <linux/kallsyms.h>
  7. #include <linux/seq_file.h>
  8. #include <linux/spinlock.h>
  9. #include <linux/uaccess.h>
  10. #include <linux/ftrace.h>
  11. #include <linux/module.h>
  12. #include <linux/sysctl.h>
  13. #include <linux/init.h>
  14. #include <asm/setup.h>
  15. #include "trace.h"
  16. #define STACK_TRACE_ENTRIES 500
  17. static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
  18. { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
  19. static unsigned stack_dump_index[STACK_TRACE_ENTRIES];
  20. /*
  21. * Reserve one entry for the passed in ip. This will allow
  22. * us to remove most or all of the stack size overhead
  23. * added by the stack tracer itself.
  24. */
  25. static struct stack_trace max_stack_trace = {
  26. .max_entries = STACK_TRACE_ENTRIES - 1,
  27. .entries = &stack_dump_trace[0],
  28. };
  29. static unsigned long max_stack_size;
  30. static arch_spinlock_t max_stack_lock =
  31. (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
  32. static DEFINE_PER_CPU(int, trace_active);
  33. static DEFINE_MUTEX(stack_sysctl_mutex);
  34. int stack_tracer_enabled;
  35. static int last_stack_tracer_enabled;
  36. static inline void print_max_stack(void)
  37. {
  38. long i;
  39. int size;
  40. pr_emerg(" Depth Size Location (%d entries)\n"
  41. " ----- ---- --------\n",
  42. max_stack_trace.nr_entries);
  43. for (i = 0; i < max_stack_trace.nr_entries; i++) {
  44. if (stack_dump_trace[i] == ULONG_MAX)
  45. break;
  46. if (i+1 == max_stack_trace.nr_entries ||
  47. stack_dump_trace[i+1] == ULONG_MAX)
  48. size = stack_dump_index[i];
  49. else
  50. size = stack_dump_index[i] - stack_dump_index[i+1];
  51. pr_emerg("%3ld) %8d %5d %pS\n", i, stack_dump_index[i],
  52. size, (void *)stack_dump_trace[i]);
  53. }
  54. }
  55. static inline void
  56. check_stack(unsigned long ip, unsigned long *stack)
  57. {
  58. unsigned long this_size, flags; unsigned long *p, *top, *start;
  59. static int tracer_frame;
  60. int frame_size = ACCESS_ONCE(tracer_frame);
  61. int i, x;
  62. this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
  63. this_size = THREAD_SIZE - this_size;
  64. /* Remove the frame of the tracer */
  65. this_size -= frame_size;
  66. if (this_size <= max_stack_size)
  67. return;
  68. /* we do not handle interrupt stacks yet */
  69. if (!object_is_on_stack(stack))
  70. return;
  71. local_irq_save(flags);
  72. arch_spin_lock(&max_stack_lock);
  73. /* In case another CPU set the tracer_frame on us */
  74. if (unlikely(!frame_size))
  75. this_size -= tracer_frame;
  76. /* a race could have already updated it */
  77. if (this_size <= max_stack_size)
  78. goto out;
  79. max_stack_size = this_size;
  80. max_stack_trace.nr_entries = 0;
  81. max_stack_trace.skip = 3;
  82. save_stack_trace(&max_stack_trace);
  83. /* Skip over the overhead of the stack tracer itself */
  84. for (i = 0; i < max_stack_trace.nr_entries; i++) {
  85. if (stack_dump_trace[i] == ip)
  86. break;
  87. }
  88. /*
  89. * Now find where in the stack these are.
  90. */
  91. x = 0;
  92. start = stack;
  93. top = (unsigned long *)
  94. (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
  95. /*
  96. * Loop through all the entries. One of the entries may
  97. * for some reason be missed on the stack, so we may
  98. * have to account for them. If they are all there, this
  99. * loop will only happen once. This code only takes place
  100. * on a new max, so it is far from a fast path.
  101. */
  102. while (i < max_stack_trace.nr_entries) {
  103. int found = 0;
  104. stack_dump_index[x] = this_size;
  105. p = start;
  106. for (; p < top && i < max_stack_trace.nr_entries; p++) {
  107. if (stack_dump_trace[i] == ULONG_MAX)
  108. break;
  109. if (*p == stack_dump_trace[i]) {
  110. stack_dump_trace[x] = stack_dump_trace[i++];
  111. this_size = stack_dump_index[x++] =
  112. (top - p) * sizeof(unsigned long);
  113. found = 1;
  114. /* Start the search from here */
  115. start = p + 1;
  116. /*
  117. * We do not want to show the overhead
  118. * of the stack tracer stack in the
  119. * max stack. If we haven't figured
  120. * out what that is, then figure it out
  121. * now.
  122. */
  123. if (unlikely(!tracer_frame)) {
  124. tracer_frame = (p - stack) *
  125. sizeof(unsigned long);
  126. max_stack_size -= tracer_frame;
  127. }
  128. }
  129. }
  130. if (!found)
  131. i++;
  132. }
  133. max_stack_trace.nr_entries = x;
  134. for (; x < i; x++)
  135. stack_dump_trace[x] = ULONG_MAX;
  136. if (task_stack_end_corrupted(current)) {
  137. print_max_stack();
  138. BUG();
  139. }
  140. out:
  141. arch_spin_unlock(&max_stack_lock);
  142. local_irq_restore(flags);
  143. }
  144. static void
  145. stack_trace_call(unsigned long ip, unsigned long parent_ip,
  146. struct ftrace_ops *op, struct pt_regs *pt_regs)
  147. {
  148. unsigned long stack;
  149. int cpu;
  150. preempt_disable_notrace();
  151. cpu = raw_smp_processor_id();
  152. /* no atomic needed, we only modify this variable by this cpu */
  153. if (per_cpu(trace_active, cpu)++ != 0)
  154. goto out;
  155. ip += MCOUNT_INSN_SIZE;
  156. check_stack(ip, &stack);
  157. out:
  158. per_cpu(trace_active, cpu)--;
  159. /* prevent recursion in schedule */
  160. preempt_enable_notrace();
  161. }
  162. static struct ftrace_ops trace_ops __read_mostly =
  163. {
  164. .func = stack_trace_call,
  165. .flags = FTRACE_OPS_FL_RECURSION_SAFE,
  166. };
  167. static ssize_t
  168. stack_max_size_read(struct file *filp, char __user *ubuf,
  169. size_t count, loff_t *ppos)
  170. {
  171. unsigned long *ptr = filp->private_data;
  172. char buf[64];
  173. int r;
  174. r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
  175. if (r > sizeof(buf))
  176. r = sizeof(buf);
  177. return simple_read_from_buffer(ubuf, count, ppos, buf, r);
  178. }
  179. static ssize_t
  180. stack_max_size_write(struct file *filp, const char __user *ubuf,
  181. size_t count, loff_t *ppos)
  182. {
  183. long *ptr = filp->private_data;
  184. unsigned long val, flags;
  185. int ret;
  186. int cpu;
  187. ret = kstrtoul_from_user(ubuf, count, 10, &val);
  188. if (ret)
  189. return ret;
  190. local_irq_save(flags);
  191. /*
  192. * In case we trace inside arch_spin_lock() or after (NMI),
  193. * we will cause circular lock, so we also need to increase
  194. * the percpu trace_active here.
  195. */
  196. cpu = smp_processor_id();
  197. per_cpu(trace_active, cpu)++;
  198. arch_spin_lock(&max_stack_lock);
  199. *ptr = val;
  200. arch_spin_unlock(&max_stack_lock);
  201. per_cpu(trace_active, cpu)--;
  202. local_irq_restore(flags);
  203. return count;
  204. }
  205. static const struct file_operations stack_max_size_fops = {
  206. .open = tracing_open_generic,
  207. .read = stack_max_size_read,
  208. .write = stack_max_size_write,
  209. .llseek = default_llseek,
  210. };
  211. static void *
  212. __next(struct seq_file *m, loff_t *pos)
  213. {
  214. long n = *pos - 1;
  215. if (n > max_stack_trace.nr_entries || stack_dump_trace[n] == ULONG_MAX)
  216. return NULL;
  217. m->private = (void *)n;
  218. return &m->private;
  219. }
  220. static void *
  221. t_next(struct seq_file *m, void *v, loff_t *pos)
  222. {
  223. (*pos)++;
  224. return __next(m, pos);
  225. }
  226. static void *t_start(struct seq_file *m, loff_t *pos)
  227. {
  228. int cpu;
  229. local_irq_disable();
  230. cpu = smp_processor_id();
  231. per_cpu(trace_active, cpu)++;
  232. arch_spin_lock(&max_stack_lock);
  233. if (*pos == 0)
  234. return SEQ_START_TOKEN;
  235. return __next(m, pos);
  236. }
  237. static void t_stop(struct seq_file *m, void *p)
  238. {
  239. int cpu;
  240. arch_spin_unlock(&max_stack_lock);
  241. cpu = smp_processor_id();
  242. per_cpu(trace_active, cpu)--;
  243. local_irq_enable();
  244. }
  245. static void trace_lookup_stack(struct seq_file *m, long i)
  246. {
  247. unsigned long addr = stack_dump_trace[i];
  248. seq_printf(m, "%pS\n", (void *)addr);
  249. }
  250. static void print_disabled(struct seq_file *m)
  251. {
  252. seq_puts(m, "#\n"
  253. "# Stack tracer disabled\n"
  254. "#\n"
  255. "# To enable the stack tracer, either add 'stacktrace' to the\n"
  256. "# kernel command line\n"
  257. "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
  258. "#\n");
  259. }
  260. static int t_show(struct seq_file *m, void *v)
  261. {
  262. long i;
  263. int size;
  264. if (v == SEQ_START_TOKEN) {
  265. seq_printf(m, " Depth Size Location"
  266. " (%d entries)\n"
  267. " ----- ---- --------\n",
  268. max_stack_trace.nr_entries);
  269. if (!stack_tracer_enabled && !max_stack_size)
  270. print_disabled(m);
  271. return 0;
  272. }
  273. i = *(long *)v;
  274. if (i >= max_stack_trace.nr_entries ||
  275. stack_dump_trace[i] == ULONG_MAX)
  276. return 0;
  277. if (i+1 == max_stack_trace.nr_entries ||
  278. stack_dump_trace[i+1] == ULONG_MAX)
  279. size = stack_dump_index[i];
  280. else
  281. size = stack_dump_index[i] - stack_dump_index[i+1];
  282. seq_printf(m, "%3ld) %8d %5d ", i, stack_dump_index[i], size);
  283. trace_lookup_stack(m, i);
  284. return 0;
  285. }
  286. static const struct seq_operations stack_trace_seq_ops = {
  287. .start = t_start,
  288. .next = t_next,
  289. .stop = t_stop,
  290. .show = t_show,
  291. };
  292. static int stack_trace_open(struct inode *inode, struct file *file)
  293. {
  294. return seq_open(file, &stack_trace_seq_ops);
  295. }
  296. static const struct file_operations stack_trace_fops = {
  297. .open = stack_trace_open,
  298. .read = seq_read,
  299. .llseek = seq_lseek,
  300. .release = seq_release,
  301. };
  302. static int
  303. stack_trace_filter_open(struct inode *inode, struct file *file)
  304. {
  305. return ftrace_regex_open(&trace_ops, FTRACE_ITER_FILTER,
  306. inode, file);
  307. }
  308. static const struct file_operations stack_trace_filter_fops = {
  309. .open = stack_trace_filter_open,
  310. .read = seq_read,
  311. .write = ftrace_filter_write,
  312. .llseek = tracing_lseek,
  313. .release = ftrace_regex_release,
  314. };
  315. int
  316. stack_trace_sysctl(struct ctl_table *table, int write,
  317. void __user *buffer, size_t *lenp,
  318. loff_t *ppos)
  319. {
  320. int ret;
  321. mutex_lock(&stack_sysctl_mutex);
  322. ret = proc_dointvec(table, write, buffer, lenp, ppos);
  323. if (ret || !write ||
  324. (last_stack_tracer_enabled == !!stack_tracer_enabled))
  325. goto out;
  326. last_stack_tracer_enabled = !!stack_tracer_enabled;
  327. if (stack_tracer_enabled)
  328. register_ftrace_function(&trace_ops);
  329. else
  330. unregister_ftrace_function(&trace_ops);
  331. out:
  332. mutex_unlock(&stack_sysctl_mutex);
  333. return ret;
  334. }
  335. static char stack_trace_filter_buf[COMMAND_LINE_SIZE+1] __initdata;
  336. static __init int enable_stacktrace(char *str)
  337. {
  338. if (strncmp(str, "_filter=", 8) == 0)
  339. strncpy(stack_trace_filter_buf, str+8, COMMAND_LINE_SIZE);
  340. stack_tracer_enabled = 1;
  341. last_stack_tracer_enabled = 1;
  342. return 1;
  343. }
  344. __setup("stacktrace", enable_stacktrace);
  345. static __init int stack_trace_init(void)
  346. {
  347. struct dentry *d_tracer;
  348. d_tracer = tracing_init_dentry();
  349. if (IS_ERR(d_tracer))
  350. return 0;
  351. trace_create_file("stack_max_size", 0644, d_tracer,
  352. &max_stack_size, &stack_max_size_fops);
  353. trace_create_file("stack_trace", 0444, d_tracer,
  354. NULL, &stack_trace_fops);
  355. trace_create_file("stack_trace_filter", 0444, d_tracer,
  356. NULL, &stack_trace_filter_fops);
  357. if (stack_trace_filter_buf[0])
  358. ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1);
  359. if (stack_tracer_enabled)
  360. register_ftrace_function(&trace_ops);
  361. return 0;
  362. }
  363. device_initcall(stack_trace_init);