trace_stack.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488
  1. /*
  2. * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
  3. *
  4. */
  5. #include <linux/stacktrace.h>
  6. #include <linux/kallsyms.h>
  7. #include <linux/seq_file.h>
  8. #include <linux/spinlock.h>
  9. #include <linux/uaccess.h>
  10. #include <linux/debugfs.h>
  11. #include <linux/ftrace.h>
  12. #include <linux/module.h>
  13. #include <linux/sysctl.h>
  14. #include <linux/init.h>
  15. #include <linux/fs.h>
  16. #include <linux/magic.h>
  17. #include <asm/setup.h>
  18. #include "trace.h"
  19. #define STACK_TRACE_ENTRIES 500
  20. #ifdef CC_USING_FENTRY
  21. # define fentry 1
  22. #else
  23. # define fentry 0
  24. #endif
  25. static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
  26. { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
  27. static unsigned stack_dump_index[STACK_TRACE_ENTRIES];
  28. /*
  29. * Reserve one entry for the passed in ip. This will allow
  30. * us to remove most or all of the stack size overhead
  31. * added by the stack tracer itself.
  32. */
  33. static struct stack_trace max_stack_trace = {
  34. .max_entries = STACK_TRACE_ENTRIES - 1,
  35. .entries = &stack_dump_trace[1],
  36. };
  37. static unsigned long max_stack_size;
  38. static arch_spinlock_t max_stack_lock =
  39. (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
  40. static DEFINE_PER_CPU(int, trace_active);
  41. static DEFINE_MUTEX(stack_sysctl_mutex);
  42. int stack_tracer_enabled;
  43. static int last_stack_tracer_enabled;
  44. static inline void print_max_stack(void)
  45. {
  46. long i;
  47. int size;
  48. pr_emerg(" Depth Size Location (%d entries)\n"
  49. " ----- ---- --------\n",
  50. max_stack_trace.nr_entries - 1);
  51. for (i = 0; i < max_stack_trace.nr_entries; i++) {
  52. if (stack_dump_trace[i] == ULONG_MAX)
  53. break;
  54. if (i+1 == max_stack_trace.nr_entries ||
  55. stack_dump_trace[i+1] == ULONG_MAX)
  56. size = stack_dump_index[i];
  57. else
  58. size = stack_dump_index[i] - stack_dump_index[i+1];
  59. pr_emerg("%3ld) %8d %5d %pS\n", i, stack_dump_index[i],
  60. size, (void *)stack_dump_trace[i]);
  61. }
  62. }
  63. static inline void
  64. check_stack(unsigned long ip, unsigned long *stack)
  65. {
  66. unsigned long this_size, flags; unsigned long *p, *top, *start;
  67. static int tracer_frame;
  68. int frame_size = ACCESS_ONCE(tracer_frame);
  69. int i;
  70. this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
  71. this_size = THREAD_SIZE - this_size;
  72. /* Remove the frame of the tracer */
  73. this_size -= frame_size;
  74. if (this_size <= max_stack_size)
  75. return;
  76. /* we do not handle interrupt stacks yet */
  77. if (!object_is_on_stack(stack))
  78. return;
  79. local_irq_save(flags);
  80. arch_spin_lock(&max_stack_lock);
  81. /* In case another CPU set the tracer_frame on us */
  82. if (unlikely(!frame_size))
  83. this_size -= tracer_frame;
  84. /* a race could have already updated it */
  85. if (this_size <= max_stack_size)
  86. goto out;
  87. max_stack_size = this_size;
  88. max_stack_trace.nr_entries = 0;
  89. if (using_ftrace_ops_list_func())
  90. max_stack_trace.skip = 4;
  91. else
  92. max_stack_trace.skip = 3;
  93. save_stack_trace(&max_stack_trace);
  94. /*
  95. * Add the passed in ip from the function tracer.
  96. * Searching for this on the stack will skip over
  97. * most of the overhead from the stack tracer itself.
  98. */
  99. stack_dump_trace[0] = ip;
  100. max_stack_trace.nr_entries++;
  101. /*
  102. * Now find where in the stack these are.
  103. */
  104. i = 0;
  105. start = stack;
  106. top = (unsigned long *)
  107. (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
  108. /*
  109. * Loop through all the entries. One of the entries may
  110. * for some reason be missed on the stack, so we may
  111. * have to account for them. If they are all there, this
  112. * loop will only happen once. This code only takes place
  113. * on a new max, so it is far from a fast path.
  114. */
  115. while (i < max_stack_trace.nr_entries) {
  116. int found = 0;
  117. stack_dump_index[i] = this_size;
  118. p = start;
  119. for (; p < top && i < max_stack_trace.nr_entries; p++) {
  120. if (*p == stack_dump_trace[i]) {
  121. this_size = stack_dump_index[i++] =
  122. (top - p) * sizeof(unsigned long);
  123. found = 1;
  124. /* Start the search from here */
  125. start = p + 1;
  126. /*
  127. * We do not want to show the overhead
  128. * of the stack tracer stack in the
  129. * max stack. If we haven't figured
  130. * out what that is, then figure it out
  131. * now.
  132. */
  133. if (unlikely(!tracer_frame) && i == 1) {
  134. tracer_frame = (p - stack) *
  135. sizeof(unsigned long);
  136. max_stack_size -= tracer_frame;
  137. }
  138. }
  139. }
  140. if (!found)
  141. i++;
  142. }
  143. if ((current != &init_task &&
  144. *(end_of_stack(current)) != STACK_END_MAGIC)) {
  145. print_max_stack();
  146. BUG();
  147. }
  148. out:
  149. arch_spin_unlock(&max_stack_lock);
  150. local_irq_restore(flags);
  151. }
  152. static void
  153. stack_trace_call(unsigned long ip, unsigned long parent_ip,
  154. struct ftrace_ops *op, struct pt_regs *pt_regs)
  155. {
  156. unsigned long stack;
  157. int cpu;
  158. preempt_disable_notrace();
  159. cpu = raw_smp_processor_id();
  160. /* no atomic needed, we only modify this variable by this cpu */
  161. if (per_cpu(trace_active, cpu)++ != 0)
  162. goto out;
  163. /*
  164. * When fentry is used, the traced function does not get
  165. * its stack frame set up, and we lose the parent.
  166. * The ip is pretty useless because the function tracer
  167. * was called before that function set up its stack frame.
  168. * In this case, we use the parent ip.
  169. *
  170. * By adding the return address of either the parent ip
  171. * or the current ip we can disregard most of the stack usage
  172. * caused by the stack tracer itself.
  173. *
  174. * The function tracer always reports the address of where the
  175. * mcount call was, but the stack will hold the return address.
  176. */
  177. if (fentry)
  178. ip = parent_ip;
  179. else
  180. ip += MCOUNT_INSN_SIZE;
  181. check_stack(ip, &stack);
  182. out:
  183. per_cpu(trace_active, cpu)--;
  184. /* prevent recursion in schedule */
  185. preempt_enable_notrace();
  186. }
  187. static struct ftrace_ops trace_ops __read_mostly =
  188. {
  189. .func = stack_trace_call,
  190. .flags = FTRACE_OPS_FL_RECURSION_SAFE,
  191. };
  192. static ssize_t
  193. stack_max_size_read(struct file *filp, char __user *ubuf,
  194. size_t count, loff_t *ppos)
  195. {
  196. unsigned long *ptr = filp->private_data;
  197. char buf[64];
  198. int r;
  199. r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
  200. if (r > sizeof(buf))
  201. r = sizeof(buf);
  202. return simple_read_from_buffer(ubuf, count, ppos, buf, r);
  203. }
  204. static ssize_t
  205. stack_max_size_write(struct file *filp, const char __user *ubuf,
  206. size_t count, loff_t *ppos)
  207. {
  208. long *ptr = filp->private_data;
  209. unsigned long val, flags;
  210. int ret;
  211. int cpu;
  212. ret = kstrtoul_from_user(ubuf, count, 10, &val);
  213. if (ret)
  214. return ret;
  215. local_irq_save(flags);
  216. /*
  217. * In case we trace inside arch_spin_lock() or after (NMI),
  218. * we will cause circular lock, so we also need to increase
  219. * the percpu trace_active here.
  220. */
  221. cpu = smp_processor_id();
  222. per_cpu(trace_active, cpu)++;
  223. arch_spin_lock(&max_stack_lock);
  224. *ptr = val;
  225. arch_spin_unlock(&max_stack_lock);
  226. per_cpu(trace_active, cpu)--;
  227. local_irq_restore(flags);
  228. return count;
  229. }
  230. static const struct file_operations stack_max_size_fops = {
  231. .open = tracing_open_generic,
  232. .read = stack_max_size_read,
  233. .write = stack_max_size_write,
  234. .llseek = default_llseek,
  235. };
  236. static void *
  237. __next(struct seq_file *m, loff_t *pos)
  238. {
  239. long n = *pos - 1;
  240. if (n >= max_stack_trace.nr_entries || stack_dump_trace[n] == ULONG_MAX)
  241. return NULL;
  242. m->private = (void *)n;
  243. return &m->private;
  244. }
  245. static void *
  246. t_next(struct seq_file *m, void *v, loff_t *pos)
  247. {
  248. (*pos)++;
  249. return __next(m, pos);
  250. }
  251. static void *t_start(struct seq_file *m, loff_t *pos)
  252. {
  253. int cpu;
  254. local_irq_disable();
  255. cpu = smp_processor_id();
  256. per_cpu(trace_active, cpu)++;
  257. arch_spin_lock(&max_stack_lock);
  258. if (*pos == 0)
  259. return SEQ_START_TOKEN;
  260. return __next(m, pos);
  261. }
  262. static void t_stop(struct seq_file *m, void *p)
  263. {
  264. int cpu;
  265. arch_spin_unlock(&max_stack_lock);
  266. cpu = smp_processor_id();
  267. per_cpu(trace_active, cpu)--;
  268. local_irq_enable();
  269. }
  270. static int trace_lookup_stack(struct seq_file *m, long i)
  271. {
  272. unsigned long addr = stack_dump_trace[i];
  273. return seq_printf(m, "%pS\n", (void *)addr);
  274. }
  275. static void print_disabled(struct seq_file *m)
  276. {
  277. seq_puts(m, "#\n"
  278. "# Stack tracer disabled\n"
  279. "#\n"
  280. "# To enable the stack tracer, either add 'stacktrace' to the\n"
  281. "# kernel command line\n"
  282. "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
  283. "#\n");
  284. }
  285. static int t_show(struct seq_file *m, void *v)
  286. {
  287. long i;
  288. int size;
  289. if (v == SEQ_START_TOKEN) {
  290. seq_printf(m, " Depth Size Location"
  291. " (%d entries)\n"
  292. " ----- ---- --------\n",
  293. max_stack_trace.nr_entries - 1);
  294. if (!stack_tracer_enabled && !max_stack_size)
  295. print_disabled(m);
  296. return 0;
  297. }
  298. i = *(long *)v;
  299. if (i >= max_stack_trace.nr_entries ||
  300. stack_dump_trace[i] == ULONG_MAX)
  301. return 0;
  302. if (i+1 == max_stack_trace.nr_entries ||
  303. stack_dump_trace[i+1] == ULONG_MAX)
  304. size = stack_dump_index[i];
  305. else
  306. size = stack_dump_index[i] - stack_dump_index[i+1];
  307. seq_printf(m, "%3ld) %8d %5d ", i, stack_dump_index[i], size);
  308. trace_lookup_stack(m, i);
  309. return 0;
  310. }
  311. static const struct seq_operations stack_trace_seq_ops = {
  312. .start = t_start,
  313. .next = t_next,
  314. .stop = t_stop,
  315. .show = t_show,
  316. };
  317. static int stack_trace_open(struct inode *inode, struct file *file)
  318. {
  319. return seq_open(file, &stack_trace_seq_ops);
  320. }
  321. static const struct file_operations stack_trace_fops = {
  322. .open = stack_trace_open,
  323. .read = seq_read,
  324. .llseek = seq_lseek,
  325. .release = seq_release,
  326. };
  327. static int
  328. stack_trace_filter_open(struct inode *inode, struct file *file)
  329. {
  330. return ftrace_regex_open(&trace_ops, FTRACE_ITER_FILTER,
  331. inode, file);
  332. }
  333. static const struct file_operations stack_trace_filter_fops = {
  334. .open = stack_trace_filter_open,
  335. .read = seq_read,
  336. .write = ftrace_filter_write,
  337. .llseek = tracing_lseek,
  338. .release = ftrace_regex_release,
  339. };
  340. int
  341. stack_trace_sysctl(struct ctl_table *table, int write,
  342. void __user *buffer, size_t *lenp,
  343. loff_t *ppos)
  344. {
  345. int ret;
  346. mutex_lock(&stack_sysctl_mutex);
  347. ret = proc_dointvec(table, write, buffer, lenp, ppos);
  348. if (ret || !write ||
  349. (last_stack_tracer_enabled == !!stack_tracer_enabled))
  350. goto out;
  351. last_stack_tracer_enabled = !!stack_tracer_enabled;
  352. if (stack_tracer_enabled)
  353. register_ftrace_function(&trace_ops);
  354. else
  355. unregister_ftrace_function(&trace_ops);
  356. out:
  357. mutex_unlock(&stack_sysctl_mutex);
  358. return ret;
  359. }
  360. static char stack_trace_filter_buf[COMMAND_LINE_SIZE+1] __initdata;
  361. static __init int enable_stacktrace(char *str)
  362. {
  363. if (strncmp(str, "_filter=", 8) == 0)
  364. strncpy(stack_trace_filter_buf, str+8, COMMAND_LINE_SIZE);
  365. stack_tracer_enabled = 1;
  366. last_stack_tracer_enabled = 1;
  367. return 1;
  368. }
  369. __setup("stacktrace", enable_stacktrace);
  370. static __init int stack_trace_init(void)
  371. {
  372. struct dentry *d_tracer;
  373. d_tracer = tracing_init_dentry();
  374. if (!d_tracer)
  375. return 0;
  376. trace_create_file("stack_max_size", 0644, d_tracer,
  377. &max_stack_size, &stack_max_size_fops);
  378. trace_create_file("stack_trace", 0444, d_tracer,
  379. NULL, &stack_trace_fops);
  380. trace_create_file("stack_trace_filter", 0444, d_tracer,
  381. NULL, &stack_trace_filter_fops);
  382. if (stack_trace_filter_buf[0])
  383. ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1);
  384. if (stack_tracer_enabled)
  385. register_ftrace_function(&trace_ops);
  386. return 0;
  387. }
  388. device_initcall(stack_trace_init);