dumpstack.c 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228
  1. /*
  2. * Stack dumping functions
  3. *
  4. * Copyright IBM Corp. 1999, 2013
  5. */
  6. #include <linux/kallsyms.h>
  7. #include <linux/hardirq.h>
  8. #include <linux/kprobes.h>
  9. #include <linux/utsname.h>
  10. #include <linux/export.h>
  11. #include <linux/kdebug.h>
  12. #include <linux/ptrace.h>
  13. #include <linux/mm.h>
  14. #include <linux/module.h>
  15. #include <linux/sched.h>
  16. #include <asm/processor.h>
  17. #include <asm/debug.h>
  18. #include <asm/dis.h>
  19. #include <asm/ipl.h>
  20. /*
  21. * For dump_trace we have tree different stack to consider:
  22. * - the panic stack which is used if the kernel stack has overflown
  23. * - the asynchronous interrupt stack (cpu related)
  24. * - the synchronous kernel stack (process related)
  25. * The stack trace can start at any of the three stacks and can potentially
  26. * touch all of them. The order is: panic stack, async stack, sync stack.
  27. */
  28. static unsigned long
  29. __dump_trace(dump_trace_func_t func, void *data, unsigned long sp,
  30. unsigned long low, unsigned long high)
  31. {
  32. struct stack_frame *sf;
  33. struct pt_regs *regs;
  34. while (1) {
  35. if (sp < low || sp > high - sizeof(*sf))
  36. return sp;
  37. sf = (struct stack_frame *) sp;
  38. /* Follow the backchain. */
  39. while (1) {
  40. if (func(data, sf->gprs[8]))
  41. return sp;
  42. low = sp;
  43. sp = sf->back_chain;
  44. if (!sp)
  45. break;
  46. if (sp <= low || sp > high - sizeof(*sf))
  47. return sp;
  48. sf = (struct stack_frame *) sp;
  49. }
  50. /* Zero backchain detected, check for interrupt frame. */
  51. sp = (unsigned long) (sf + 1);
  52. if (sp <= low || sp > high - sizeof(*regs))
  53. return sp;
  54. regs = (struct pt_regs *) sp;
  55. if (!user_mode(regs)) {
  56. if (func(data, regs->psw.addr))
  57. return sp;
  58. }
  59. low = sp;
  60. sp = regs->gprs[15];
  61. }
  62. }
  63. void dump_trace(dump_trace_func_t func, void *data, struct task_struct *task,
  64. unsigned long sp)
  65. {
  66. unsigned long frame_size;
  67. frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
  68. #ifdef CONFIG_CHECK_STACK
  69. sp = __dump_trace(func, data, sp,
  70. S390_lowcore.panic_stack + frame_size - 4096,
  71. S390_lowcore.panic_stack + frame_size);
  72. #endif
  73. sp = __dump_trace(func, data, sp,
  74. S390_lowcore.async_stack + frame_size - ASYNC_SIZE,
  75. S390_lowcore.async_stack + frame_size);
  76. task = task ?: current;
  77. __dump_trace(func, data, sp,
  78. (unsigned long)task_stack_page(task),
  79. (unsigned long)task_stack_page(task) + THREAD_SIZE);
  80. }
  81. EXPORT_SYMBOL_GPL(dump_trace);
  82. struct return_address_data {
  83. unsigned long address;
  84. int depth;
  85. };
  86. static int __return_address(void *data, unsigned long address)
  87. {
  88. struct return_address_data *rd = data;
  89. if (rd->depth--)
  90. return 0;
  91. rd->address = address;
  92. return 1;
  93. }
  94. unsigned long return_address(int depth)
  95. {
  96. struct return_address_data rd = { .depth = depth + 2 };
  97. dump_trace(__return_address, &rd, NULL, current_stack_pointer());
  98. return rd.address;
  99. }
  100. EXPORT_SYMBOL_GPL(return_address);
  101. static int show_address(void *data, unsigned long address)
  102. {
  103. printk("([<%016lx>] %pSR)\n", address, (void *)address);
  104. return 0;
  105. }
  106. static void show_trace(struct task_struct *task, unsigned long sp)
  107. {
  108. if (!sp)
  109. sp = task ? task->thread.ksp : current_stack_pointer();
  110. printk("Call Trace:\n");
  111. dump_trace(show_address, NULL, task, sp);
  112. if (!task)
  113. task = current;
  114. debug_show_held_locks(task);
  115. }
  116. void show_stack(struct task_struct *task, unsigned long *sp)
  117. {
  118. unsigned long *stack;
  119. int i;
  120. stack = sp;
  121. if (!stack) {
  122. if (!task)
  123. stack = (unsigned long *)current_stack_pointer();
  124. else
  125. stack = (unsigned long *)task->thread.ksp;
  126. }
  127. for (i = 0; i < 20; i++) {
  128. if (((addr_t) stack & (THREAD_SIZE-1)) == 0)
  129. break;
  130. if ((i * sizeof(long) % 32) == 0)
  131. printk("%s ", i == 0 ? "" : "\n");
  132. printk("%016lx ", *stack++);
  133. }
  134. printk("\n");
  135. show_trace(task, (unsigned long)sp);
  136. }
  137. static void show_last_breaking_event(struct pt_regs *regs)
  138. {
  139. printk("Last Breaking-Event-Address:\n");
  140. printk(" [<%016lx>] %pSR\n", regs->args[0], (void *)regs->args[0]);
  141. }
  142. void show_registers(struct pt_regs *regs)
  143. {
  144. struct psw_bits *psw = &psw_bits(regs->psw);
  145. char *mode;
  146. mode = user_mode(regs) ? "User" : "Krnl";
  147. printk("%s PSW : %p %p", mode, (void *)regs->psw.mask, (void *)regs->psw.addr);
  148. if (!user_mode(regs))
  149. printk(" (%pSR)", (void *)regs->psw.addr);
  150. printk("\n");
  151. printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x "
  152. "P:%x AS:%x CC:%x PM:%x", psw->r, psw->t, psw->i, psw->e,
  153. psw->key, psw->m, psw->w, psw->p, psw->as, psw->cc, psw->pm);
  154. printk(" RI:%x EA:%x", psw->ri, psw->eaba);
  155. printk("\n%s GPRS: %016lx %016lx %016lx %016lx\n", mode,
  156. regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
  157. printk(" %016lx %016lx %016lx %016lx\n",
  158. regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);
  159. printk(" %016lx %016lx %016lx %016lx\n",
  160. regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]);
  161. printk(" %016lx %016lx %016lx %016lx\n",
  162. regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]);
  163. show_code(regs);
  164. }
  165. void show_regs(struct pt_regs *regs)
  166. {
  167. show_regs_print_info(KERN_DEFAULT);
  168. show_registers(regs);
  169. /* Show stack backtrace if pt_regs is from kernel mode */
  170. if (!user_mode(regs))
  171. show_trace(NULL, regs->gprs[15]);
  172. show_last_breaking_event(regs);
  173. }
  174. static DEFINE_SPINLOCK(die_lock);
  175. void die(struct pt_regs *regs, const char *str)
  176. {
  177. static int die_counter;
  178. oops_enter();
  179. lgr_info_log();
  180. debug_stop_all();
  181. console_verbose();
  182. spin_lock_irq(&die_lock);
  183. bust_spinlocks(1);
  184. printk("%s: %04x ilc:%d [#%d] ", str, regs->int_code & 0xffff,
  185. regs->int_code >> 17, ++die_counter);
  186. #ifdef CONFIG_PREEMPT
  187. printk("PREEMPT ");
  188. #endif
  189. #ifdef CONFIG_SMP
  190. printk("SMP ");
  191. #endif
  192. if (debug_pagealloc_enabled())
  193. printk("DEBUG_PAGEALLOC");
  194. printk("\n");
  195. notify_die(DIE_OOPS, str, regs, 0, regs->int_code & 0xffff, SIGSEGV);
  196. print_modules();
  197. show_regs(regs);
  198. bust_spinlocks(0);
  199. add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
  200. spin_unlock_irq(&die_lock);
  201. if (in_interrupt())
  202. panic("Fatal exception in interrupt");
  203. if (panic_on_oops)
  204. panic("Fatal exception: panic_on_oops");
  205. oops_exit();
  206. do_exit(SIGSEGV);
  207. }