ptrace.h 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * Copyright (C) 1998-2004 Hewlett-Packard Co
  4. * David Mosberger-Tang <davidm@hpl.hp.com>
  5. * Stephane Eranian <eranian@hpl.hp.com>
  6. * Copyright (C) 2003 Intel Co
  7. * Suresh Siddha <suresh.b.siddha@intel.com>
  8. * Fenghua Yu <fenghua.yu@intel.com>
  9. * Arun Sharma <arun.sharma@intel.com>
  10. *
  11. * 12/07/98 S. Eranian added pt_regs & switch_stack
  12. * 12/21/98 D. Mosberger updated to match latest code
  13. * 6/17/99 D. Mosberger added second unat member to "struct switch_stack"
  14. *
  15. */
  16. #ifndef _ASM_IA64_PTRACE_H
  17. #define _ASM_IA64_PTRACE_H
  18. #ifndef ASM_OFFSETS_C
  19. #include <asm/asm-offsets.h>
  20. #endif
  21. #include <uapi/asm/ptrace.h>
  22. /*
  23. * Base-2 logarithm of number of pages to allocate per task structure
  24. * (including register backing store and memory stack):
  25. */
  26. #if defined(CONFIG_IA64_PAGE_SIZE_4KB)
  27. # define KERNEL_STACK_SIZE_ORDER 3
  28. #elif defined(CONFIG_IA64_PAGE_SIZE_8KB)
  29. # define KERNEL_STACK_SIZE_ORDER 2
  30. #elif defined(CONFIG_IA64_PAGE_SIZE_16KB)
  31. # define KERNEL_STACK_SIZE_ORDER 1
  32. #else
  33. # define KERNEL_STACK_SIZE_ORDER 0
  34. #endif
  35. #define IA64_RBS_OFFSET ((IA64_TASK_SIZE + IA64_THREAD_INFO_SIZE + 31) & ~31)
  36. #define IA64_STK_OFFSET ((1 << KERNEL_STACK_SIZE_ORDER)*PAGE_SIZE)
  37. #define KERNEL_STACK_SIZE IA64_STK_OFFSET
  38. #ifndef __ASSEMBLY__
  39. #include <asm/current.h>
  40. #include <asm/page.h>
  41. /*
  42. * We use the ia64_psr(regs)->ri to determine which of the three
  43. * instructions in bundle (16 bytes) took the sample. Generate
  44. * the canonical representation by adding to instruction pointer.
  45. */
  46. # define instruction_pointer(regs) ((regs)->cr_iip + ia64_psr(regs)->ri)
  47. static inline unsigned long user_stack_pointer(struct pt_regs *regs)
  48. {
  49. /* FIXME: should this be bspstore + nr_dirty regs? */
  50. return regs->ar_bspstore;
  51. }
  52. static inline int is_syscall_success(struct pt_regs *regs)
  53. {
  54. return regs->r10 != -1;
  55. }
  56. static inline long regs_return_value(struct pt_regs *regs)
  57. {
  58. if (is_syscall_success(regs))
  59. return regs->r8;
  60. else
  61. return -regs->r8;
  62. }
  63. /* Conserve space in histogram by encoding slot bits in address
  64. * bits 2 and 3 rather than bits 0 and 1.
  65. */
  66. #define profile_pc(regs) \
  67. ({ \
  68. unsigned long __ip = instruction_pointer(regs); \
  69. (__ip & ~3UL) + ((__ip & 3UL) << 2); \
  70. })
  71. /*
  72. * Why not default? Because user_stack_pointer() on ia64 gives register
  73. * stack backing store instead...
  74. */
  75. #define current_user_stack_pointer() (current_pt_regs()->r12)
  76. /* given a pointer to a task_struct, return the user's pt_regs */
  77. # define task_pt_regs(t) (((struct pt_regs *) ((char *) (t) + IA64_STK_OFFSET)) - 1)
  78. # define ia64_psr(regs) ((struct ia64_psr *) &(regs)->cr_ipsr)
  79. # define user_mode(regs) (((struct ia64_psr *) &(regs)->cr_ipsr)->cpl != 0)
  80. # define user_stack(task,regs) ((long) regs - (long) task == IA64_STK_OFFSET - sizeof(*regs))
  81. # define fsys_mode(task,regs) \
  82. ({ \
  83. struct task_struct *_task = (task); \
  84. struct pt_regs *_regs = (regs); \
  85. !user_mode(_regs) && user_stack(_task, _regs); \
  86. })
  87. /*
  88. * System call handlers that, upon successful completion, need to return a negative value
  89. * should call force_successful_syscall_return() right before returning. On architectures
  90. * where the syscall convention provides for a separate error flag (e.g., alpha, ia64,
  91. * ppc{,64}, sparc{,64}, possibly others), this macro can be used to ensure that the error
  92. * flag will not get set. On architectures which do not support a separate error flag,
  93. * the macro is a no-op and the spurious error condition needs to be filtered out by some
  94. * other means (e.g., in user-level, by passing an extra argument to the syscall handler,
  95. * or something along those lines).
  96. *
  97. * On ia64, we can clear the user's pt_regs->r8 to force a successful syscall.
  98. */
  99. # define force_successful_syscall_return() (task_pt_regs(current)->r8 = 0)
  100. struct task_struct; /* forward decl */
  101. struct unw_frame_info; /* forward decl */
  102. extern void ia64_do_show_stack (struct unw_frame_info *, void *);
  103. extern unsigned long ia64_get_user_rbs_end (struct task_struct *, struct pt_regs *,
  104. unsigned long *);
  105. extern long ia64_peek (struct task_struct *, struct switch_stack *, unsigned long,
  106. unsigned long, long *);
  107. extern long ia64_poke (struct task_struct *, struct switch_stack *, unsigned long,
  108. unsigned long, long);
  109. extern void ia64_flush_fph (struct task_struct *);
  110. extern void ia64_sync_fph (struct task_struct *);
  111. extern void ia64_sync_krbs(void);
  112. extern long ia64_sync_user_rbs (struct task_struct *, struct switch_stack *,
  113. unsigned long, unsigned long);
  114. /* get nat bits for scratch registers such that bit N==1 iff scratch register rN is a NaT */
  115. extern unsigned long ia64_get_scratch_nat_bits (struct pt_regs *pt, unsigned long scratch_unat);
  116. /* put nat bits for scratch registers such that scratch register rN is a NaT iff bit N==1 */
  117. extern unsigned long ia64_put_scratch_nat_bits (struct pt_regs *pt, unsigned long nat);
  118. extern void ia64_increment_ip (struct pt_regs *pt);
  119. extern void ia64_decrement_ip (struct pt_regs *pt);
  120. extern void ia64_ptrace_stop(void);
  121. #define arch_ptrace_stop(code, info) \
  122. ia64_ptrace_stop()
  123. #define arch_ptrace_stop_needed(code, info) \
  124. (!test_thread_flag(TIF_RESTORE_RSE))
  125. extern void ptrace_attach_sync_user_rbs (struct task_struct *);
  126. #define arch_ptrace_attach(child) \
  127. ptrace_attach_sync_user_rbs(child)
  128. #define arch_has_single_step() (1)
  129. #define arch_has_block_step() (1)
  130. #endif /* !__ASSEMBLY__ */
  131. #endif /* _ASM_IA64_PTRACE_H */