stacktrace.c 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116
  1. /*
  2. * Stack trace management functions
  3. *
  4. * Copyright IBM Corp. 2006
  5. * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
  6. */
  7. #include <linux/sched.h>
  8. #include <linux/stacktrace.h>
  9. #include <linux/kallsyms.h>
  10. #include <linux/module.h>
  11. static unsigned long save_context_stack(struct stack_trace *trace,
  12. unsigned long sp,
  13. unsigned long low,
  14. unsigned long high,
  15. int nosched)
  16. {
  17. struct stack_frame *sf;
  18. struct pt_regs *regs;
  19. unsigned long addr;
  20. while(1) {
  21. if (sp < low || sp > high)
  22. return sp;
  23. sf = (struct stack_frame *)sp;
  24. while(1) {
  25. addr = sf->gprs[8];
  26. if (!trace->skip)
  27. trace->entries[trace->nr_entries++] = addr;
  28. else
  29. trace->skip--;
  30. if (trace->nr_entries >= trace->max_entries)
  31. return sp;
  32. low = sp;
  33. sp = sf->back_chain;
  34. if (!sp)
  35. break;
  36. if (sp <= low || sp > high - sizeof(*sf))
  37. return sp;
  38. sf = (struct stack_frame *)sp;
  39. }
  40. /* Zero backchain detected, check for interrupt frame. */
  41. sp = (unsigned long)(sf + 1);
  42. if (sp <= low || sp > high - sizeof(*regs))
  43. return sp;
  44. regs = (struct pt_regs *)sp;
  45. addr = regs->psw.addr;
  46. if (!nosched || !in_sched_functions(addr)) {
  47. if (!trace->skip)
  48. trace->entries[trace->nr_entries++] = addr;
  49. else
  50. trace->skip--;
  51. }
  52. if (trace->nr_entries >= trace->max_entries)
  53. return sp;
  54. low = sp;
  55. sp = regs->gprs[15];
  56. }
  57. }
  58. static void __save_stack_trace(struct stack_trace *trace, unsigned long sp)
  59. {
  60. unsigned long new_sp, frame_size;
  61. frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
  62. new_sp = save_context_stack(trace, sp,
  63. S390_lowcore.panic_stack + frame_size - PAGE_SIZE,
  64. S390_lowcore.panic_stack + frame_size, 0);
  65. new_sp = save_context_stack(trace, new_sp,
  66. S390_lowcore.async_stack + frame_size - ASYNC_SIZE,
  67. S390_lowcore.async_stack + frame_size, 0);
  68. save_context_stack(trace, new_sp,
  69. S390_lowcore.thread_info,
  70. S390_lowcore.thread_info + THREAD_SIZE, 0);
  71. }
  72. void save_stack_trace(struct stack_trace *trace)
  73. {
  74. register unsigned long r15 asm ("15");
  75. unsigned long sp;
  76. sp = r15;
  77. __save_stack_trace(trace, sp);
  78. if (trace->nr_entries < trace->max_entries)
  79. trace->entries[trace->nr_entries++] = ULONG_MAX;
  80. }
  81. EXPORT_SYMBOL_GPL(save_stack_trace);
  82. void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
  83. {
  84. unsigned long sp, low, high;
  85. sp = tsk->thread.ksp;
  86. if (tsk == current) {
  87. /* Get current stack pointer. */
  88. asm volatile("la %0,0(15)" : "=a" (sp));
  89. }
  90. low = (unsigned long) task_stack_page(tsk);
  91. high = (unsigned long) task_pt_regs(tsk);
  92. save_context_stack(trace, sp, low, high, 1);
  93. if (trace->nr_entries < trace->max_entries)
  94. trace->entries[trace->nr_entries++] = ULONG_MAX;
  95. }
  96. EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
  97. void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
  98. {
  99. unsigned long sp;
  100. sp = kernel_stack_pointer(regs);
  101. __save_stack_trace(trace, sp);
  102. if (trace->nr_entries < trace->max_entries)
  103. trace->entries[trace->nr_entries++] = ULONG_MAX;
  104. }
  105. EXPORT_SYMBOL_GPL(save_stack_trace_regs);