opt.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498
  1. /*
  2. * Kernel Probes Jump Optimization (Optprobes)
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  17. *
  18. * Copyright (C) IBM Corporation, 2002, 2004
  19. * Copyright (C) Hitachi Ltd., 2012
  20. */
  21. #include <linux/kprobes.h>
  22. #include <linux/ptrace.h>
  23. #include <linux/string.h>
  24. #include <linux/slab.h>
  25. #include <linux/hardirq.h>
  26. #include <linux/preempt.h>
  27. #include <linux/extable.h>
  28. #include <linux/kdebug.h>
  29. #include <linux/kallsyms.h>
  30. #include <linux/ftrace.h>
  31. #include <linux/frame.h>
  32. #include <asm/text-patching.h>
  33. #include <asm/cacheflush.h>
  34. #include <asm/desc.h>
  35. #include <asm/pgtable.h>
  36. #include <linux/uaccess.h>
  37. #include <asm/alternative.h>
  38. #include <asm/insn.h>
  39. #include <asm/debugreg.h>
  40. #include <asm/set_memory.h>
  41. #include <asm/sections.h>
  42. #include <asm/nospec-branch.h>
  43. #include "common.h"
  44. unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr)
  45. {
  46. struct optimized_kprobe *op;
  47. struct kprobe *kp;
  48. long offs;
  49. int i;
  50. for (i = 0; i < RELATIVEJUMP_SIZE; i++) {
  51. kp = get_kprobe((void *)addr - i);
  52. /* This function only handles jump-optimized kprobe */
  53. if (kp && kprobe_optimized(kp)) {
  54. op = container_of(kp, struct optimized_kprobe, kp);
  55. /* If op->list is not empty, op is under optimizing */
  56. if (list_empty(&op->list))
  57. goto found;
  58. }
  59. }
  60. return addr;
  61. found:
  62. /*
  63. * If the kprobe can be optimized, original bytes which can be
  64. * overwritten by jump destination address. In this case, original
  65. * bytes must be recovered from op->optinsn.copied_insn buffer.
  66. */
  67. if (probe_kernel_read(buf, (void *)addr,
  68. MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
  69. return 0UL;
  70. if (addr == (unsigned long)kp->addr) {
  71. buf[0] = kp->opcode;
  72. memcpy(buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE);
  73. } else {
  74. offs = addr - (unsigned long)kp->addr - 1;
  75. memcpy(buf, op->optinsn.copied_insn + offs, RELATIVE_ADDR_SIZE - offs);
  76. }
  77. return (unsigned long)buf;
  78. }
  79. /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
  80. static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
  81. {
  82. #ifdef CONFIG_X86_64
  83. *addr++ = 0x48;
  84. *addr++ = 0xbf;
  85. #else
  86. *addr++ = 0xb8;
  87. #endif
  88. *(unsigned long *)addr = val;
  89. }
  90. asm (
  91. "optprobe_template_func:\n"
  92. ".global optprobe_template_entry\n"
  93. "optprobe_template_entry:\n"
  94. #ifdef CONFIG_X86_64
  95. /* We don't bother saving the ss register */
  96. " pushq %rsp\n"
  97. " pushfq\n"
  98. SAVE_REGS_STRING
  99. " movq %rsp, %rsi\n"
  100. ".global optprobe_template_val\n"
  101. "optprobe_template_val:\n"
  102. ASM_NOP5
  103. ASM_NOP5
  104. ".global optprobe_template_call\n"
  105. "optprobe_template_call:\n"
  106. ASM_NOP5
  107. /* Move flags to rsp */
  108. " movq 144(%rsp), %rdx\n"
  109. " movq %rdx, 152(%rsp)\n"
  110. RESTORE_REGS_STRING
  111. /* Skip flags entry */
  112. " addq $8, %rsp\n"
  113. " popfq\n"
  114. #else /* CONFIG_X86_32 */
  115. " pushf\n"
  116. SAVE_REGS_STRING
  117. " movl %esp, %edx\n"
  118. ".global optprobe_template_val\n"
  119. "optprobe_template_val:\n"
  120. ASM_NOP5
  121. ".global optprobe_template_call\n"
  122. "optprobe_template_call:\n"
  123. ASM_NOP5
  124. RESTORE_REGS_STRING
  125. " addl $4, %esp\n" /* skip cs */
  126. " popf\n"
  127. #endif
  128. ".global optprobe_template_end\n"
  129. "optprobe_template_end:\n"
  130. ".type optprobe_template_func, @function\n"
  131. ".size optprobe_template_func, .-optprobe_template_func\n");
  132. void optprobe_template_func(void);
  133. STACK_FRAME_NON_STANDARD(optprobe_template_func);
  134. #define TMPL_MOVE_IDX \
  135. ((long)optprobe_template_val - (long)optprobe_template_entry)
  136. #define TMPL_CALL_IDX \
  137. ((long)optprobe_template_call - (long)optprobe_template_entry)
  138. #define TMPL_END_IDX \
  139. ((long)optprobe_template_end - (long)optprobe_template_entry)
  140. #define INT3_SIZE sizeof(kprobe_opcode_t)
  141. /* Optimized kprobe call back function: called from optinsn */
  142. static void
  143. optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs)
  144. {
  145. /* This is possible if op is under delayed unoptimizing */
  146. if (kprobe_disabled(&op->kp))
  147. return;
  148. preempt_disable();
  149. if (kprobe_running()) {
  150. kprobes_inc_nmissed_count(&op->kp);
  151. } else {
  152. struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
  153. /* Save skipped registers */
  154. #ifdef CONFIG_X86_64
  155. regs->cs = __KERNEL_CS;
  156. #else
  157. regs->cs = __KERNEL_CS | get_kernel_rpl();
  158. regs->gs = 0;
  159. #endif
  160. regs->ip = (unsigned long)op->kp.addr + INT3_SIZE;
  161. regs->orig_ax = ~0UL;
  162. __this_cpu_write(current_kprobe, &op->kp);
  163. kcb->kprobe_status = KPROBE_HIT_ACTIVE;
  164. opt_pre_handler(&op->kp, regs);
  165. __this_cpu_write(current_kprobe, NULL);
  166. }
  167. preempt_enable();
  168. }
  169. NOKPROBE_SYMBOL(optimized_callback);
  170. static int copy_optimized_instructions(u8 *dest, u8 *src, u8 *real)
  171. {
  172. struct insn insn;
  173. int len = 0, ret;
  174. while (len < RELATIVEJUMP_SIZE) {
  175. ret = __copy_instruction(dest + len, src + len, real, &insn);
  176. if (!ret || !can_boost(&insn, src + len))
  177. return -EINVAL;
  178. len += ret;
  179. }
  180. /* Check whether the address range is reserved */
  181. if (ftrace_text_reserved(src, src + len - 1) ||
  182. alternatives_text_reserved(src, src + len - 1) ||
  183. jump_label_text_reserved(src, src + len - 1))
  184. return -EBUSY;
  185. return len;
  186. }
  187. /* Check whether insn is indirect jump */
  188. static int __insn_is_indirect_jump(struct insn *insn)
  189. {
  190. return ((insn->opcode.bytes[0] == 0xff &&
  191. (X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */
  192. insn->opcode.bytes[0] == 0xea); /* Segment based jump */
  193. }
  194. /* Check whether insn jumps into specified address range */
  195. static int insn_jump_into_range(struct insn *insn, unsigned long start, int len)
  196. {
  197. unsigned long target = 0;
  198. switch (insn->opcode.bytes[0]) {
  199. case 0xe0: /* loopne */
  200. case 0xe1: /* loope */
  201. case 0xe2: /* loop */
  202. case 0xe3: /* jcxz */
  203. case 0xe9: /* near relative jump */
  204. case 0xeb: /* short relative jump */
  205. break;
  206. case 0x0f:
  207. if ((insn->opcode.bytes[1] & 0xf0) == 0x80) /* jcc near */
  208. break;
  209. return 0;
  210. default:
  211. if ((insn->opcode.bytes[0] & 0xf0) == 0x70) /* jcc short */
  212. break;
  213. return 0;
  214. }
  215. target = (unsigned long)insn->next_byte + insn->immediate.value;
  216. return (start <= target && target <= start + len);
  217. }
  218. static int insn_is_indirect_jump(struct insn *insn)
  219. {
  220. int ret = __insn_is_indirect_jump(insn);
  221. #ifdef CONFIG_RETPOLINE
  222. /*
  223. * Jump to x86_indirect_thunk_* is treated as an indirect jump.
  224. * Note that even with CONFIG_RETPOLINE=y, the kernel compiled with
  225. * older gcc may use indirect jump. So we add this check instead of
  226. * replace indirect-jump check.
  227. */
  228. if (!ret)
  229. ret = insn_jump_into_range(insn,
  230. (unsigned long)__indirect_thunk_start,
  231. (unsigned long)__indirect_thunk_end -
  232. (unsigned long)__indirect_thunk_start);
  233. #endif
  234. return ret;
  235. }
  236. /* Decode whole function to ensure any instructions don't jump into target */
  237. static int can_optimize(unsigned long paddr)
  238. {
  239. unsigned long addr, size = 0, offset = 0;
  240. struct insn insn;
  241. kprobe_opcode_t buf[MAX_INSN_SIZE];
  242. /* Lookup symbol including addr */
  243. if (!kallsyms_lookup_size_offset(paddr, &size, &offset))
  244. return 0;
  245. /*
  246. * Do not optimize in the entry code due to the unstable
  247. * stack handling and registers setup.
  248. */
  249. if (((paddr >= (unsigned long)__entry_text_start) &&
  250. (paddr < (unsigned long)__entry_text_end)) ||
  251. ((paddr >= (unsigned long)__irqentry_text_start) &&
  252. (paddr < (unsigned long)__irqentry_text_end)))
  253. return 0;
  254. /* Check there is enough space for a relative jump. */
  255. if (size - offset < RELATIVEJUMP_SIZE)
  256. return 0;
  257. /* Decode instructions */
  258. addr = paddr - offset;
  259. while (addr < paddr - offset + size) { /* Decode until function end */
  260. unsigned long recovered_insn;
  261. if (search_exception_tables(addr))
  262. /*
  263. * Since some fixup code will jumps into this function,
  264. * we can't optimize kprobe in this function.
  265. */
  266. return 0;
  267. recovered_insn = recover_probed_instruction(buf, addr);
  268. if (!recovered_insn)
  269. return 0;
  270. kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE);
  271. insn_get_length(&insn);
  272. /* Another subsystem puts a breakpoint */
  273. if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
  274. return 0;
  275. /* Recover address */
  276. insn.kaddr = (void *)addr;
  277. insn.next_byte = (void *)(addr + insn.length);
  278. /* Check any instructions don't jump into target */
  279. if (insn_is_indirect_jump(&insn) ||
  280. insn_jump_into_range(&insn, paddr + INT3_SIZE,
  281. RELATIVE_ADDR_SIZE))
  282. return 0;
  283. addr += insn.length;
  284. }
  285. return 1;
  286. }
  287. /* Check optimized_kprobe can actually be optimized. */
  288. int arch_check_optimized_kprobe(struct optimized_kprobe *op)
  289. {
  290. int i;
  291. struct kprobe *p;
  292. for (i = 1; i < op->optinsn.size; i++) {
  293. p = get_kprobe(op->kp.addr + i);
  294. if (p && !kprobe_disabled(p))
  295. return -EEXIST;
  296. }
  297. return 0;
  298. }
  299. /* Check the addr is within the optimized instructions. */
  300. int arch_within_optimized_kprobe(struct optimized_kprobe *op,
  301. unsigned long addr)
  302. {
  303. return ((unsigned long)op->kp.addr <= addr &&
  304. (unsigned long)op->kp.addr + op->optinsn.size > addr);
  305. }
  306. /* Free optimized instruction slot */
  307. static
  308. void __arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty)
  309. {
  310. if (op->optinsn.insn) {
  311. free_optinsn_slot(op->optinsn.insn, dirty);
  312. op->optinsn.insn = NULL;
  313. op->optinsn.size = 0;
  314. }
  315. }
  316. void arch_remove_optimized_kprobe(struct optimized_kprobe *op)
  317. {
  318. __arch_remove_optimized_kprobe(op, 1);
  319. }
  320. /*
  321. * Copy replacing target instructions
  322. * Target instructions MUST be relocatable (checked inside)
  323. * This is called when new aggr(opt)probe is allocated or reused.
  324. */
  325. int arch_prepare_optimized_kprobe(struct optimized_kprobe *op,
  326. struct kprobe *__unused)
  327. {
  328. u8 *buf = NULL, *slot;
  329. int ret, len;
  330. long rel;
  331. if (!can_optimize((unsigned long)op->kp.addr))
  332. return -EILSEQ;
  333. buf = kzalloc(MAX_OPTINSN_SIZE, GFP_KERNEL);
  334. if (!buf)
  335. return -ENOMEM;
  336. op->optinsn.insn = slot = get_optinsn_slot();
  337. if (!slot) {
  338. ret = -ENOMEM;
  339. goto out;
  340. }
  341. /*
  342. * Verify if the address gap is in 2GB range, because this uses
  343. * a relative jump.
  344. */
  345. rel = (long)slot - (long)op->kp.addr + RELATIVEJUMP_SIZE;
  346. if (abs(rel) > 0x7fffffff) {
  347. ret = -ERANGE;
  348. goto err;
  349. }
  350. /* Copy arch-dep-instance from template */
  351. memcpy(buf, optprobe_template_entry, TMPL_END_IDX);
  352. /* Copy instructions into the out-of-line buffer */
  353. ret = copy_optimized_instructions(buf + TMPL_END_IDX, op->kp.addr,
  354. slot + TMPL_END_IDX);
  355. if (ret < 0)
  356. goto err;
  357. op->optinsn.size = ret;
  358. len = TMPL_END_IDX + op->optinsn.size;
  359. /* Set probe information */
  360. synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
  361. /* Set probe function call */
  362. synthesize_relcall(buf + TMPL_CALL_IDX,
  363. slot + TMPL_CALL_IDX, optimized_callback);
  364. /* Set returning jmp instruction at the tail of out-of-line buffer */
  365. synthesize_reljump(buf + len, slot + len,
  366. (u8 *)op->kp.addr + op->optinsn.size);
  367. len += RELATIVEJUMP_SIZE;
  368. /* We have to use text_poke for instuction buffer because it is RO */
  369. text_poke(slot, buf, len);
  370. ret = 0;
  371. out:
  372. kfree(buf);
  373. return ret;
  374. err:
  375. __arch_remove_optimized_kprobe(op, 0);
  376. goto out;
  377. }
  378. /*
  379. * Replace breakpoints (int3) with relative jumps.
  380. * Caller must call with locking kprobe_mutex and text_mutex.
  381. */
  382. void arch_optimize_kprobes(struct list_head *oplist)
  383. {
  384. struct optimized_kprobe *op, *tmp;
  385. u8 insn_buf[RELATIVEJUMP_SIZE];
  386. list_for_each_entry_safe(op, tmp, oplist, list) {
  387. s32 rel = (s32)((long)op->optinsn.insn -
  388. ((long)op->kp.addr + RELATIVEJUMP_SIZE));
  389. WARN_ON(kprobe_disabled(&op->kp));
  390. /* Backup instructions which will be replaced by jump address */
  391. memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
  392. RELATIVE_ADDR_SIZE);
  393. insn_buf[0] = RELATIVEJUMP_OPCODE;
  394. *(s32 *)(&insn_buf[1]) = rel;
  395. text_poke_bp(op->kp.addr, insn_buf, RELATIVEJUMP_SIZE,
  396. op->optinsn.insn);
  397. list_del_init(&op->list);
  398. }
  399. }
  400. /* Replace a relative jump with a breakpoint (int3). */
  401. void arch_unoptimize_kprobe(struct optimized_kprobe *op)
  402. {
  403. u8 insn_buf[RELATIVEJUMP_SIZE];
  404. /* Set int3 to first byte for kprobes */
  405. insn_buf[0] = BREAKPOINT_INSTRUCTION;
  406. memcpy(insn_buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE);
  407. text_poke_bp(op->kp.addr, insn_buf, RELATIVEJUMP_SIZE,
  408. op->optinsn.insn);
  409. }
  410. /*
  411. * Recover original instructions and breakpoints from relative jumps.
  412. * Caller must call with locking kprobe_mutex.
  413. */
  414. extern void arch_unoptimize_kprobes(struct list_head *oplist,
  415. struct list_head *done_list)
  416. {
  417. struct optimized_kprobe *op, *tmp;
  418. list_for_each_entry_safe(op, tmp, oplist, list) {
  419. arch_unoptimize_kprobe(op);
  420. list_move(&op->list, done_list);
  421. }
  422. }
  423. int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
  424. {
  425. struct optimized_kprobe *op;
  426. if (p->flags & KPROBE_FLAG_OPTIMIZED) {
  427. /* This kprobe is really able to run optimized path. */
  428. op = container_of(p, struct optimized_kprobe, kp);
  429. /* Detour through copied instructions */
  430. regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
  431. if (!reenter)
  432. reset_current_kprobe();
  433. return 1;
  434. }
  435. return 0;
  436. }
  437. NOKPROBE_SYMBOL(setup_detour_execution);