stacktrace.c 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243
  1. /*
  2. * Stack trace management functions
  3. *
  4. * Copyright (C) 2006-2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  5. */
  6. #include <linux/sched.h>
  7. #include <linux/sched/debug.h>
  8. #include <linux/sched/task_stack.h>
  9. #include <linux/stacktrace.h>
  10. #include <linux/export.h>
  11. #include <linux/uaccess.h>
  12. #include <asm/stacktrace.h>
  13. #include <asm/unwind.h>
  14. static int save_stack_address(struct stack_trace *trace, unsigned long addr,
  15. bool nosched)
  16. {
  17. if (nosched && in_sched_functions(addr))
  18. return 0;
  19. if (trace->skip > 0) {
  20. trace->skip--;
  21. return 0;
  22. }
  23. if (trace->nr_entries >= trace->max_entries)
  24. return -1;
  25. trace->entries[trace->nr_entries++] = addr;
  26. return 0;
  27. }
  28. static void noinline __save_stack_trace(struct stack_trace *trace,
  29. struct task_struct *task, struct pt_regs *regs,
  30. bool nosched)
  31. {
  32. struct unwind_state state;
  33. unsigned long addr;
  34. if (regs)
  35. save_stack_address(trace, regs->ip, nosched);
  36. for (unwind_start(&state, task, regs, NULL); !unwind_done(&state);
  37. unwind_next_frame(&state)) {
  38. addr = unwind_get_return_address(&state);
  39. if (!addr || save_stack_address(trace, addr, nosched))
  40. break;
  41. }
  42. if (trace->nr_entries < trace->max_entries)
  43. trace->entries[trace->nr_entries++] = ULONG_MAX;
  44. }
  45. /*
  46. * Save stack-backtrace addresses into a stack_trace buffer.
  47. */
  48. void save_stack_trace(struct stack_trace *trace)
  49. {
  50. trace->skip++;
  51. __save_stack_trace(trace, current, NULL, false);
  52. }
  53. EXPORT_SYMBOL_GPL(save_stack_trace);
  54. void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
  55. {
  56. __save_stack_trace(trace, current, regs, false);
  57. }
  58. void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
  59. {
  60. if (!try_get_task_stack(tsk))
  61. return;
  62. if (tsk == current)
  63. trace->skip++;
  64. __save_stack_trace(trace, tsk, NULL, true);
  65. put_task_stack(tsk);
  66. }
  67. EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
  68. #ifdef CONFIG_HAVE_RELIABLE_STACKTRACE
  69. #define STACKTRACE_DUMP_ONCE(task) ({ \
  70. static bool __section(.data.unlikely) __dumped; \
  71. \
  72. if (!__dumped) { \
  73. __dumped = true; \
  74. WARN_ON(1); \
  75. show_stack(task, NULL); \
  76. } \
  77. })
  78. static int __always_inline
  79. __save_stack_trace_reliable(struct stack_trace *trace,
  80. struct task_struct *task)
  81. {
  82. struct unwind_state state;
  83. struct pt_regs *regs;
  84. unsigned long addr;
  85. for (unwind_start(&state, task, NULL, NULL); !unwind_done(&state);
  86. unwind_next_frame(&state)) {
  87. regs = unwind_get_entry_regs(&state, NULL);
  88. if (regs) {
  89. /*
  90. * Kernel mode registers on the stack indicate an
  91. * in-kernel interrupt or exception (e.g., preemption
  92. * or a page fault), which can make frame pointers
  93. * unreliable.
  94. */
  95. if (!user_mode(regs))
  96. return -EINVAL;
  97. /*
  98. * The last frame contains the user mode syscall
  99. * pt_regs. Skip it and finish the unwind.
  100. */
  101. unwind_next_frame(&state);
  102. if (!unwind_done(&state)) {
  103. STACKTRACE_DUMP_ONCE(task);
  104. return -EINVAL;
  105. }
  106. break;
  107. }
  108. addr = unwind_get_return_address(&state);
  109. /*
  110. * A NULL or invalid return address probably means there's some
  111. * generated code which __kernel_text_address() doesn't know
  112. * about.
  113. */
  114. if (!addr) {
  115. STACKTRACE_DUMP_ONCE(task);
  116. return -EINVAL;
  117. }
  118. if (save_stack_address(trace, addr, false))
  119. return -EINVAL;
  120. }
  121. /* Check for stack corruption */
  122. if (unwind_error(&state)) {
  123. STACKTRACE_DUMP_ONCE(task);
  124. return -EINVAL;
  125. }
  126. if (trace->nr_entries < trace->max_entries)
  127. trace->entries[trace->nr_entries++] = ULONG_MAX;
  128. return 0;
  129. }
  130. /*
  131. * This function returns an error if it detects any unreliable features of the
  132. * stack. Otherwise it guarantees that the stack trace is reliable.
  133. *
  134. * If the task is not 'current', the caller *must* ensure the task is inactive.
  135. */
  136. int save_stack_trace_tsk_reliable(struct task_struct *tsk,
  137. struct stack_trace *trace)
  138. {
  139. int ret;
  140. /*
  141. * If the task doesn't have a stack (e.g., a zombie), the stack is
  142. * "reliably" empty.
  143. */
  144. if (!try_get_task_stack(tsk))
  145. return 0;
  146. ret = __save_stack_trace_reliable(trace, tsk);
  147. put_task_stack(tsk);
  148. return ret;
  149. }
  150. #endif /* CONFIG_HAVE_RELIABLE_STACKTRACE */
  151. /* Userspace stacktrace - based on kernel/trace/trace_sysprof.c */
  152. struct stack_frame_user {
  153. const void __user *next_fp;
  154. unsigned long ret_addr;
  155. };
  156. static int
  157. copy_stack_frame(const void __user *fp, struct stack_frame_user *frame)
  158. {
  159. int ret;
  160. if (!access_ok(VERIFY_READ, fp, sizeof(*frame)))
  161. return 0;
  162. ret = 1;
  163. pagefault_disable();
  164. if (__copy_from_user_inatomic(frame, fp, sizeof(*frame)))
  165. ret = 0;
  166. pagefault_enable();
  167. return ret;
  168. }
  169. static inline void __save_stack_trace_user(struct stack_trace *trace)
  170. {
  171. const struct pt_regs *regs = task_pt_regs(current);
  172. const void __user *fp = (const void __user *)regs->bp;
  173. if (trace->nr_entries < trace->max_entries)
  174. trace->entries[trace->nr_entries++] = regs->ip;
  175. while (trace->nr_entries < trace->max_entries) {
  176. struct stack_frame_user frame;
  177. frame.next_fp = NULL;
  178. frame.ret_addr = 0;
  179. if (!copy_stack_frame(fp, &frame))
  180. break;
  181. if ((unsigned long)fp < regs->sp)
  182. break;
  183. if (frame.ret_addr) {
  184. trace->entries[trace->nr_entries++] =
  185. frame.ret_addr;
  186. }
  187. if (fp == frame.next_fp)
  188. break;
  189. fp = frame.next_fp;
  190. }
  191. }
  192. void save_stack_trace_user(struct stack_trace *trace)
  193. {
  194. /*
  195. * Trace user stack if we are not a kernel thread
  196. */
  197. if (current->mm) {
  198. __save_stack_trace_user(trace);
  199. }
  200. if (trace->nr_entries < trace->max_entries)
  201. trace->entries[trace->nr_entries++] = ULONG_MAX;
  202. }