stacktrace.h 2.5 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495
  1. /*
  2. * Copyright (C) 2012 ARM Ltd.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public License
  14. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  15. */
  16. #ifndef __ASM_STACKTRACE_H
  17. #define __ASM_STACKTRACE_H
  18. #include <linux/percpu.h>
  19. #include <linux/sched.h>
  20. #include <linux/sched/task_stack.h>
  21. #include <asm/memory.h>
  22. #include <asm/ptrace.h>
  23. #include <asm/sdei.h>
  24. struct stackframe {
  25. unsigned long fp;
  26. unsigned long pc;
  27. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  28. int graph;
  29. #endif
  30. };
  31. extern int unwind_frame(struct task_struct *tsk, struct stackframe *frame);
  32. extern void walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
  33. int (*fn)(struct stackframe *, void *), void *data);
  34. extern void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk);
  35. DECLARE_PER_CPU(unsigned long *, irq_stack_ptr);
  36. static inline bool on_irq_stack(unsigned long sp)
  37. {
  38. unsigned long low = (unsigned long)raw_cpu_read(irq_stack_ptr);
  39. unsigned long high = low + IRQ_STACK_SIZE;
  40. if (!low)
  41. return false;
  42. return (low <= sp && sp < high);
  43. }
  44. static inline bool on_task_stack(struct task_struct *tsk, unsigned long sp)
  45. {
  46. unsigned long low = (unsigned long)task_stack_page(tsk);
  47. unsigned long high = low + THREAD_SIZE;
  48. return (low <= sp && sp < high);
  49. }
  50. #ifdef CONFIG_VMAP_STACK
  51. DECLARE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack);
  52. static inline bool on_overflow_stack(unsigned long sp)
  53. {
  54. unsigned long low = (unsigned long)raw_cpu_ptr(overflow_stack);
  55. unsigned long high = low + OVERFLOW_STACK_SIZE;
  56. return (low <= sp && sp < high);
  57. }
  58. #else
  59. static inline bool on_overflow_stack(unsigned long sp) { return false; }
  60. #endif
  61. /*
  62. * We can only safely access per-cpu stacks from current in a non-preemptible
  63. * context.
  64. */
  65. static inline bool on_accessible_stack(struct task_struct *tsk, unsigned long sp)
  66. {
  67. if (on_task_stack(tsk, sp))
  68. return true;
  69. if (tsk != current || preemptible())
  70. return false;
  71. if (on_irq_stack(sp))
  72. return true;
  73. if (on_overflow_stack(sp))
  74. return true;
  75. if (on_sdei_stack(sp))
  76. return true;
  77. return false;
  78. }
  79. #endif /* __ASM_STACKTRACE_H */