task_stack.h 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _LINUX_SCHED_TASK_STACK_H
  3. #define _LINUX_SCHED_TASK_STACK_H
  4. /*
  5. * task->stack (kernel stack) handling interfaces:
  6. */
  7. #include <linux/sched.h>
  8. #include <linux/magic.h>
  9. #ifdef CONFIG_THREAD_INFO_IN_TASK
  10. /*
  11. * When accessing the stack of a non-current task that might exit, use
  12. * try_get_task_stack() instead. task_stack_page will return a pointer
  13. * that could get freed out from under you.
  14. */
  15. static inline void *task_stack_page(const struct task_struct *task)
  16. {
  17. return task->stack;
  18. }
  19. #define setup_thread_stack(new,old) do { } while(0)
  20. static inline unsigned long *end_of_stack(const struct task_struct *task)
  21. {
  22. return task->stack;
  23. }
  24. #elif !defined(__HAVE_THREAD_FUNCTIONS)
  25. #define task_stack_page(task) ((void *)(task)->stack)
  26. static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
  27. {
  28. *task_thread_info(p) = *task_thread_info(org);
  29. task_thread_info(p)->task = p;
  30. }
  31. /*
  32. * Return the address of the last usable long on the stack.
  33. *
  34. * When the stack grows down, this is just above the thread
  35. * info struct. Going any lower will corrupt the threadinfo.
  36. *
  37. * When the stack grows up, this is the highest address.
  38. * Beyond that position, we corrupt data on the next page.
  39. */
  40. static inline unsigned long *end_of_stack(struct task_struct *p)
  41. {
  42. #ifdef CONFIG_STACK_GROWSUP
  43. return (unsigned long *)((unsigned long)task_thread_info(p) + THREAD_SIZE) - 1;
  44. #else
  45. return (unsigned long *)(task_thread_info(p) + 1);
  46. #endif
  47. }
  48. #endif
  49. #ifdef CONFIG_THREAD_INFO_IN_TASK
  50. static inline void *try_get_task_stack(struct task_struct *tsk)
  51. {
  52. return atomic_inc_not_zero(&tsk->stack_refcount) ?
  53. task_stack_page(tsk) : NULL;
  54. }
  55. extern void put_task_stack(struct task_struct *tsk);
  56. #else
  57. static inline void *try_get_task_stack(struct task_struct *tsk)
  58. {
  59. return task_stack_page(tsk);
  60. }
  61. static inline void put_task_stack(struct task_struct *tsk) {}
  62. #endif
  63. #define task_stack_end_corrupted(task) \
  64. (*(end_of_stack(task)) != STACK_END_MAGIC)
  65. static inline int object_is_on_stack(const void *obj)
  66. {
  67. void *stack = task_stack_page(current);
  68. return (obj >= stack) && (obj < (stack + THREAD_SIZE));
  69. }
  70. extern void thread_stack_cache_init(void);
  71. #ifdef CONFIG_DEBUG_STACK_USAGE
  72. static inline unsigned long stack_not_used(struct task_struct *p)
  73. {
  74. unsigned long *n = end_of_stack(p);
  75. do { /* Skip over canary */
  76. # ifdef CONFIG_STACK_GROWSUP
  77. n--;
  78. # else
  79. n++;
  80. # endif
  81. } while (!*n);
  82. # ifdef CONFIG_STACK_GROWSUP
  83. return (unsigned long)end_of_stack(p) - (unsigned long)n;
  84. # else
  85. return (unsigned long)n - (unsigned long)end_of_stack(p);
  86. # endif
  87. }
  88. #endif
  89. extern void set_task_stack_end_magic(struct task_struct *tsk);
  90. #ifndef __HAVE_ARCH_KSTACK_END
  91. static inline int kstack_end(void *addr)
  92. {
  93. /* Reliable end of stack detection:
  94. * Some APM bios versions misalign the stack
  95. */
  96. return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));
  97. }
  98. #endif
  99. #endif /* _LINUX_SCHED_TASK_STACK_H */