thread_info.h 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * Copyright (C) 2002-2003 Hewlett-Packard Co
  4. * David Mosberger-Tang <davidm@hpl.hp.com>
  5. */
  6. #ifndef _ASM_IA64_THREAD_INFO_H
  7. #define _ASM_IA64_THREAD_INFO_H
  8. #ifndef ASM_OFFSETS_C
  9. #include <asm/asm-offsets.h>
  10. #endif
  11. #include <asm/processor.h>
  12. #include <asm/ptrace.h>
  13. #define THREAD_SIZE KERNEL_STACK_SIZE
  14. #ifndef __ASSEMBLY__
  15. /*
  16. * On IA-64, we want to keep the task structure and kernel stack together, so they can be
  17. * mapped by a single TLB entry and so they can be addressed by the "current" pointer
  18. * without having to do pointer masking.
  19. */
  20. struct thread_info {
  21. struct task_struct *task; /* XXX not really needed, except for dup_task_struct() */
  22. __u32 flags; /* thread_info flags (see TIF_*) */
  23. __u32 cpu; /* current CPU */
  24. __u32 last_cpu; /* Last CPU thread ran on */
  25. __u32 status; /* Thread synchronous flags */
  26. mm_segment_t addr_limit; /* user-level address space limit */
  27. int preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */
  28. #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
  29. __u64 utime;
  30. __u64 stime;
  31. __u64 gtime;
  32. __u64 hardirq_time;
  33. __u64 softirq_time;
  34. __u64 idle_time;
  35. __u64 ac_stamp;
  36. __u64 ac_leave;
  37. __u64 ac_stime;
  38. __u64 ac_utime;
  39. #endif
  40. };
  41. #define INIT_THREAD_INFO(tsk) \
  42. { \
  43. .task = &tsk, \
  44. .flags = 0, \
  45. .cpu = 0, \
  46. .addr_limit = KERNEL_DS, \
  47. .preempt_count = INIT_PREEMPT_COUNT, \
  48. }
  49. #ifndef ASM_OFFSETS_C
  50. /* how to get the thread information struct from C */
  51. #define current_thread_info() ((struct thread_info *) ((char *) current + IA64_TASK_SIZE))
  52. #define alloc_thread_stack_node(tsk, node) \
  53. ((unsigned long *) ((char *) (tsk) + IA64_TASK_SIZE))
  54. #define task_thread_info(tsk) ((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE))
  55. #else
  56. #define current_thread_info() ((struct thread_info *) 0)
  57. #define alloc_thread_stack_node(tsk, node) ((unsigned long *) 0)
  58. #define task_thread_info(tsk) ((struct thread_info *) 0)
  59. #endif
  60. #define free_thread_stack(tsk) /* nothing */
  61. #define task_stack_page(tsk) ((void *)(tsk))
  62. #define __HAVE_THREAD_FUNCTIONS
  63. #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
  64. #define setup_thread_stack(p, org) \
  65. *task_thread_info(p) = *task_thread_info(org); \
  66. task_thread_info(p)->ac_stime = 0; \
  67. task_thread_info(p)->ac_utime = 0; \
  68. task_thread_info(p)->task = (p);
  69. #else
  70. #define setup_thread_stack(p, org) \
  71. *task_thread_info(p) = *task_thread_info(org); \
  72. task_thread_info(p)->task = (p);
  73. #endif
  74. #define end_of_stack(p) (unsigned long *)((void *)(p) + IA64_RBS_OFFSET)
  75. #define alloc_task_struct_node(node) \
  76. ({ \
  77. struct page *page = alloc_pages_node(node, GFP_KERNEL | __GFP_COMP, \
  78. KERNEL_STACK_SIZE_ORDER); \
  79. struct task_struct *ret = page ? page_address(page) : NULL; \
  80. \
  81. ret; \
  82. })
  83. #define free_task_struct(tsk) free_pages((unsigned long) (tsk), KERNEL_STACK_SIZE_ORDER)
  84. #endif /* !__ASSEMBLY */
  85. /*
  86. * thread information flags
  87. * - these are process state flags that various assembly files may need to access
  88. * - pending work-to-be-done flags are in least-significant 16 bits, other flags
  89. * in top 16 bits
  90. */
  91. #define TIF_SIGPENDING 0 /* signal pending */
  92. #define TIF_NEED_RESCHED 1 /* rescheduling necessary */
  93. #define TIF_SYSCALL_TRACE 2 /* syscall trace active */
  94. #define TIF_SYSCALL_AUDIT 3 /* syscall auditing active */
  95. #define TIF_SINGLESTEP 4 /* restore singlestep on return to user mode */
  96. #define TIF_NOTIFY_RESUME 6 /* resumption notification requested */
  97. #define TIF_MEMDIE 17 /* is terminating due to OOM killer */
  98. #define TIF_MCA_INIT 18 /* this task is processing MCA or INIT */
  99. #define TIF_DB_DISABLED 19 /* debug trap disabled for fsyscall */
  100. #define TIF_RESTORE_RSE 21 /* user RBS is newer than kernel RBS */
  101. #define TIF_POLLING_NRFLAG 22 /* idle is polling for TIF_NEED_RESCHED */
  102. #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
  103. #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
  104. #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
  105. #define _TIF_SYSCALL_TRACEAUDIT (_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP)
  106. #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
  107. #define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
  108. #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
  109. #define _TIF_MCA_INIT (1 << TIF_MCA_INIT)
  110. #define _TIF_DB_DISABLED (1 << TIF_DB_DISABLED)
  111. #define _TIF_RESTORE_RSE (1 << TIF_RESTORE_RSE)
  112. #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
  113. /* "work to do on user-return" bits */
  114. #define TIF_ALLWORK_MASK (_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SYSCALL_AUDIT|\
  115. _TIF_NEED_RESCHED|_TIF_SYSCALL_TRACE)
  116. /* like TIF_ALLWORK_BITS but sans TIF_SYSCALL_TRACE or TIF_SYSCALL_AUDIT */
  117. #define TIF_WORK_MASK (TIF_ALLWORK_MASK&~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT))
  118. #endif /* _ASM_IA64_THREAD_INFO_H */