dumpstack.c 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211
  1. /*
  2. * Stack dumping functions
  3. *
  4. * Copyright IBM Corp. 1999, 2013
  5. */
  6. #include <linux/kallsyms.h>
  7. #include <linux/hardirq.h>
  8. #include <linux/kprobes.h>
  9. #include <linux/utsname.h>
  10. #include <linux/export.h>
  11. #include <linux/kdebug.h>
  12. #include <linux/ptrace.h>
  13. #include <linux/mm.h>
  14. #include <linux/module.h>
  15. #include <linux/sched.h>
  16. #include <linux/sched/debug.h>
  17. #include <linux/sched/task_stack.h>
  18. #include <asm/processor.h>
  19. #include <asm/debug.h>
  20. #include <asm/dis.h>
  21. #include <asm/ipl.h>
  22. /*
  23. * For dump_trace we have tree different stack to consider:
  24. * - the panic stack which is used if the kernel stack has overflown
  25. * - the asynchronous interrupt stack (cpu related)
  26. * - the synchronous kernel stack (process related)
  27. * The stack trace can start at any of the three stacks and can potentially
  28. * touch all of them. The order is: panic stack, async stack, sync stack.
  29. */
  30. static unsigned long
  31. __dump_trace(dump_trace_func_t func, void *data, unsigned long sp,
  32. unsigned long low, unsigned long high)
  33. {
  34. struct stack_frame *sf;
  35. struct pt_regs *regs;
  36. while (1) {
  37. if (sp < low || sp > high - sizeof(*sf))
  38. return sp;
  39. sf = (struct stack_frame *) sp;
  40. if (func(data, sf->gprs[8], 0))
  41. return sp;
  42. /* Follow the backchain. */
  43. while (1) {
  44. low = sp;
  45. sp = sf->back_chain;
  46. if (!sp)
  47. break;
  48. if (sp <= low || sp > high - sizeof(*sf))
  49. return sp;
  50. sf = (struct stack_frame *) sp;
  51. if (func(data, sf->gprs[8], 1))
  52. return sp;
  53. }
  54. /* Zero backchain detected, check for interrupt frame. */
  55. sp = (unsigned long) (sf + 1);
  56. if (sp <= low || sp > high - sizeof(*regs))
  57. return sp;
  58. regs = (struct pt_regs *) sp;
  59. if (!user_mode(regs)) {
  60. if (func(data, regs->psw.addr, 1))
  61. return sp;
  62. }
  63. low = sp;
  64. sp = regs->gprs[15];
  65. }
  66. }
  67. void dump_trace(dump_trace_func_t func, void *data, struct task_struct *task,
  68. unsigned long sp)
  69. {
  70. unsigned long frame_size;
  71. frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
  72. #ifdef CONFIG_CHECK_STACK
  73. sp = __dump_trace(func, data, sp,
  74. S390_lowcore.panic_stack + frame_size - 4096,
  75. S390_lowcore.panic_stack + frame_size);
  76. #endif
  77. sp = __dump_trace(func, data, sp,
  78. S390_lowcore.async_stack + frame_size - ASYNC_SIZE,
  79. S390_lowcore.async_stack + frame_size);
  80. task = task ?: current;
  81. __dump_trace(func, data, sp,
  82. (unsigned long)task_stack_page(task),
  83. (unsigned long)task_stack_page(task) + THREAD_SIZE);
  84. }
  85. EXPORT_SYMBOL_GPL(dump_trace);
  86. static int show_address(void *data, unsigned long address, int reliable)
  87. {
  88. if (reliable)
  89. printk(" [<%016lx>] %pSR \n", address, (void *)address);
  90. else
  91. printk("([<%016lx>] %pSR)\n", address, (void *)address);
  92. return 0;
  93. }
  94. static void show_trace(struct task_struct *task, unsigned long sp)
  95. {
  96. if (!sp)
  97. sp = task ? task->thread.ksp : current_stack_pointer();
  98. printk("Call Trace:\n");
  99. dump_trace(show_address, NULL, task, sp);
  100. if (!task)
  101. task = current;
  102. debug_show_held_locks(task);
  103. }
  104. void show_stack(struct task_struct *task, unsigned long *sp)
  105. {
  106. unsigned long *stack;
  107. int i;
  108. stack = sp;
  109. if (!stack) {
  110. if (!task)
  111. stack = (unsigned long *)current_stack_pointer();
  112. else
  113. stack = (unsigned long *)task->thread.ksp;
  114. }
  115. printk(KERN_DEFAULT "Stack:\n");
  116. for (i = 0; i < 20; i++) {
  117. if (((addr_t) stack & (THREAD_SIZE-1)) == 0)
  118. break;
  119. if (i % 4 == 0)
  120. printk(KERN_DEFAULT " ");
  121. pr_cont("%016lx%c", *stack++, i % 4 == 3 ? '\n' : ' ');
  122. }
  123. show_trace(task, (unsigned long)sp);
  124. }
  125. static void show_last_breaking_event(struct pt_regs *regs)
  126. {
  127. printk("Last Breaking-Event-Address:\n");
  128. printk(" [<%016lx>] %pSR\n", regs->args[0], (void *)regs->args[0]);
  129. }
  130. void show_registers(struct pt_regs *regs)
  131. {
  132. struct psw_bits *psw = &psw_bits(regs->psw);
  133. char *mode;
  134. mode = user_mode(regs) ? "User" : "Krnl";
  135. printk("%s PSW : %p %p", mode, (void *)regs->psw.mask, (void *)regs->psw.addr);
  136. if (!user_mode(regs))
  137. pr_cont(" (%pSR)", (void *)regs->psw.addr);
  138. pr_cont("\n");
  139. printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x "
  140. "P:%x AS:%x CC:%x PM:%x", psw->r, psw->t, psw->i, psw->e,
  141. psw->key, psw->m, psw->w, psw->p, psw->as, psw->cc, psw->pm);
  142. pr_cont(" RI:%x EA:%x\n", psw->ri, psw->eaba);
  143. printk("%s GPRS: %016lx %016lx %016lx %016lx\n", mode,
  144. regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
  145. printk(" %016lx %016lx %016lx %016lx\n",
  146. regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);
  147. printk(" %016lx %016lx %016lx %016lx\n",
  148. regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]);
  149. printk(" %016lx %016lx %016lx %016lx\n",
  150. regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]);
  151. show_code(regs);
  152. }
  153. void show_regs(struct pt_regs *regs)
  154. {
  155. show_regs_print_info(KERN_DEFAULT);
  156. show_registers(regs);
  157. /* Show stack backtrace if pt_regs is from kernel mode */
  158. if (!user_mode(regs))
  159. show_trace(NULL, regs->gprs[15]);
  160. show_last_breaking_event(regs);
  161. }
  162. static DEFINE_SPINLOCK(die_lock);
  163. void die(struct pt_regs *regs, const char *str)
  164. {
  165. static int die_counter;
  166. oops_enter();
  167. lgr_info_log();
  168. debug_stop_all();
  169. console_verbose();
  170. spin_lock_irq(&die_lock);
  171. bust_spinlocks(1);
  172. printk("%s: %04x ilc:%d [#%d] ", str, regs->int_code & 0xffff,
  173. regs->int_code >> 17, ++die_counter);
  174. #ifdef CONFIG_PREEMPT
  175. pr_cont("PREEMPT ");
  176. #endif
  177. #ifdef CONFIG_SMP
  178. pr_cont("SMP ");
  179. #endif
  180. if (debug_pagealloc_enabled())
  181. pr_cont("DEBUG_PAGEALLOC");
  182. pr_cont("\n");
  183. notify_die(DIE_OOPS, str, regs, 0, regs->int_code & 0xffff, SIGSEGV);
  184. print_modules();
  185. show_regs(regs);
  186. bust_spinlocks(0);
  187. add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
  188. spin_unlock_irq(&die_lock);
  189. if (in_interrupt())
  190. panic("Fatal exception in interrupt");
  191. if (panic_on_oops)
  192. panic("Fatal exception: panic_on_oops");
  193. oops_exit();
  194. do_exit(SIGSEGV);
  195. }