step.c 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236
  1. /*
  2. * x86 single-step support code, common to 32-bit and 64-bit.
  3. */
  4. #include <linux/sched.h>
  5. #include <linux/sched/task_stack.h>
  6. #include <linux/mm.h>
  7. #include <linux/ptrace.h>
  8. #include <asm/desc.h>
  9. #include <asm/mmu_context.h>
  10. unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs)
  11. {
  12. unsigned long addr, seg;
  13. addr = regs->ip;
  14. seg = regs->cs & 0xffff;
  15. if (v8086_mode(regs)) {
  16. addr = (addr & 0xffff) + (seg << 4);
  17. return addr;
  18. }
  19. #ifdef CONFIG_MODIFY_LDT_SYSCALL
  20. /*
  21. * We'll assume that the code segments in the GDT
  22. * are all zero-based. That is largely true: the
  23. * TLS segments are used for data, and the PNPBIOS
  24. * and APM bios ones we just ignore here.
  25. */
  26. if ((seg & SEGMENT_TI_MASK) == SEGMENT_LDT) {
  27. struct desc_struct *desc;
  28. unsigned long base;
  29. seg >>= 3;
  30. mutex_lock(&child->mm->context.lock);
  31. if (unlikely(!child->mm->context.ldt ||
  32. seg >= child->mm->context.ldt->size))
  33. addr = -1L; /* bogus selector, access would fault */
  34. else {
  35. desc = &child->mm->context.ldt->entries[seg];
  36. base = get_desc_base(desc);
  37. /* 16-bit code segment? */
  38. if (!desc->d)
  39. addr &= 0xffff;
  40. addr += base;
  41. }
  42. mutex_unlock(&child->mm->context.lock);
  43. }
  44. #endif
  45. return addr;
  46. }
  47. static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
  48. {
  49. int i, copied;
  50. unsigned char opcode[15];
  51. unsigned long addr = convert_ip_to_linear(child, regs);
  52. copied = access_process_vm(child, addr, opcode, sizeof(opcode),
  53. FOLL_FORCE);
  54. for (i = 0; i < copied; i++) {
  55. switch (opcode[i]) {
  56. /* popf and iret */
  57. case 0x9d: case 0xcf:
  58. return 1;
  59. /* CHECKME: 64 65 */
  60. /* opcode and address size prefixes */
  61. case 0x66: case 0x67:
  62. continue;
  63. /* irrelevant prefixes (segment overrides and repeats) */
  64. case 0x26: case 0x2e:
  65. case 0x36: case 0x3e:
  66. case 0x64: case 0x65:
  67. case 0xf0: case 0xf2: case 0xf3:
  68. continue;
  69. #ifdef CONFIG_X86_64
  70. case 0x40 ... 0x4f:
  71. if (!user_64bit_mode(regs))
  72. /* 32-bit mode: register increment */
  73. return 0;
  74. /* 64-bit mode: REX prefix */
  75. continue;
  76. #endif
  77. /* CHECKME: f2, f3 */
  78. /*
  79. * pushf: NOTE! We should probably not let
  80. * the user see the TF bit being set. But
  81. * it's more pain than it's worth to avoid
  82. * it, and a debugger could emulate this
  83. * all in user space if it _really_ cares.
  84. */
  85. case 0x9c:
  86. default:
  87. return 0;
  88. }
  89. }
  90. return 0;
  91. }
  92. /*
  93. * Enable single-stepping. Return nonzero if user mode is not using TF itself.
  94. */
  95. static int enable_single_step(struct task_struct *child)
  96. {
  97. struct pt_regs *regs = task_pt_regs(child);
  98. unsigned long oflags;
  99. /*
  100. * If we stepped into a sysenter/syscall insn, it trapped in
  101. * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
  102. * If user-mode had set TF itself, then it's still clear from
  103. * do_debug() and we need to set it again to restore the user
  104. * state so we don't wrongly set TIF_FORCED_TF below.
  105. * If enable_single_step() was used last and that is what
  106. * set TIF_SINGLESTEP, then both TF and TIF_FORCED_TF are
  107. * already set and our bookkeeping is fine.
  108. */
  109. if (unlikely(test_tsk_thread_flag(child, TIF_SINGLESTEP)))
  110. regs->flags |= X86_EFLAGS_TF;
  111. /*
  112. * Always set TIF_SINGLESTEP - this guarantees that
  113. * we single-step system calls etc.. This will also
  114. * cause us to set TF when returning to user mode.
  115. */
  116. set_tsk_thread_flag(child, TIF_SINGLESTEP);
  117. oflags = regs->flags;
  118. /* Set TF on the kernel stack.. */
  119. regs->flags |= X86_EFLAGS_TF;
  120. /*
  121. * ..but if TF is changed by the instruction we will trace,
  122. * don't mark it as being "us" that set it, so that we
  123. * won't clear it by hand later.
  124. *
  125. * Note that if we don't actually execute the popf because
  126. * of a signal arriving right now or suchlike, we will lose
  127. * track of the fact that it really was "us" that set it.
  128. */
  129. if (is_setting_trap_flag(child, regs)) {
  130. clear_tsk_thread_flag(child, TIF_FORCED_TF);
  131. return 0;
  132. }
  133. /*
  134. * If TF was already set, check whether it was us who set it.
  135. * If not, we should never attempt a block step.
  136. */
  137. if (oflags & X86_EFLAGS_TF)
  138. return test_tsk_thread_flag(child, TIF_FORCED_TF);
  139. set_tsk_thread_flag(child, TIF_FORCED_TF);
  140. return 1;
  141. }
  142. void set_task_blockstep(struct task_struct *task, bool on)
  143. {
  144. unsigned long debugctl;
  145. /*
  146. * Ensure irq/preemption can't change debugctl in between.
  147. * Note also that both TIF_BLOCKSTEP and debugctl should
  148. * be changed atomically wrt preemption.
  149. *
  150. * NOTE: this means that set/clear TIF_BLOCKSTEP is only safe if
  151. * task is current or it can't be running, otherwise we can race
  152. * with __switch_to_xtra(). We rely on ptrace_freeze_traced() but
  153. * PTRACE_KILL is not safe.
  154. */
  155. local_irq_disable();
  156. debugctl = get_debugctlmsr();
  157. if (on) {
  158. debugctl |= DEBUGCTLMSR_BTF;
  159. set_tsk_thread_flag(task, TIF_BLOCKSTEP);
  160. } else {
  161. debugctl &= ~DEBUGCTLMSR_BTF;
  162. clear_tsk_thread_flag(task, TIF_BLOCKSTEP);
  163. }
  164. if (task == current)
  165. update_debugctlmsr(debugctl);
  166. local_irq_enable();
  167. }
  168. /*
  169. * Enable single or block step.
  170. */
  171. static void enable_step(struct task_struct *child, bool block)
  172. {
  173. /*
  174. * Make sure block stepping (BTF) is not enabled unless it should be.
  175. * Note that we don't try to worry about any is_setting_trap_flag()
  176. * instructions after the first when using block stepping.
  177. * So no one should try to use debugger block stepping in a program
  178. * that uses user-mode single stepping itself.
  179. */
  180. if (enable_single_step(child) && block)
  181. set_task_blockstep(child, true);
  182. else if (test_tsk_thread_flag(child, TIF_BLOCKSTEP))
  183. set_task_blockstep(child, false);
  184. }
  185. void user_enable_single_step(struct task_struct *child)
  186. {
  187. enable_step(child, 0);
  188. }
  189. void user_enable_block_step(struct task_struct *child)
  190. {
  191. enable_step(child, 1);
  192. }
  193. void user_disable_single_step(struct task_struct *child)
  194. {
  195. /*
  196. * Make sure block stepping (BTF) is disabled.
  197. */
  198. if (test_tsk_thread_flag(child, TIF_BLOCKSTEP))
  199. set_task_blockstep(child, false);
  200. /* Always clear TIF_SINGLESTEP... */
  201. clear_tsk_thread_flag(child, TIF_SINGLESTEP);
  202. /* But touch TF only if it was set by us.. */
  203. if (test_and_clear_tsk_thread_flag(child, TIF_FORCED_TF))
  204. task_pt_regs(child)->flags &= ~X86_EFLAGS_TF;
  205. }