kprobes.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591
  1. /*
  2. * arch/arm64/kernel/probes/kprobes.c
  3. *
  4. * Kprobes support for ARM64
  5. *
  6. * Copyright (C) 2013 Linaro Limited.
  7. * Author: Sandeepa Prabhu <sandeepa.prabhu@linaro.org>
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * General Public License for more details.
  17. *
  18. */
  19. #include <linux/kasan.h>
  20. #include <linux/kernel.h>
  21. #include <linux/kprobes.h>
  22. #include <linux/extable.h>
  23. #include <linux/slab.h>
  24. #include <linux/stop_machine.h>
  25. #include <linux/sched/debug.h>
  26. #include <linux/set_memory.h>
  27. #include <linux/stringify.h>
  28. #include <linux/vmalloc.h>
  29. #include <asm/traps.h>
  30. #include <asm/ptrace.h>
  31. #include <asm/cacheflush.h>
  32. #include <asm/debug-monitors.h>
  33. #include <asm/system_misc.h>
  34. #include <asm/insn.h>
  35. #include <linux/uaccess.h>
  36. #include <asm/irq.h>
  37. #include <asm/sections.h>
  38. #include "decode-insn.h"
  39. DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
  40. DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
  41. static void __kprobes
  42. post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *);
  43. static int __kprobes patch_text(kprobe_opcode_t *addr, u32 opcode)
  44. {
  45. void *addrs[1];
  46. u32 insns[1];
  47. addrs[0] = addr;
  48. insns[0] = opcode;
  49. return aarch64_insn_patch_text(addrs, insns, 1);
  50. }
  51. static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
  52. {
  53. /* prepare insn slot */
  54. patch_text(p->ainsn.api.insn, p->opcode);
  55. flush_icache_range((uintptr_t) (p->ainsn.api.insn),
  56. (uintptr_t) (p->ainsn.api.insn) +
  57. MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
  58. /*
  59. * Needs restoring of return address after stepping xol.
  60. */
  61. p->ainsn.api.restore = (unsigned long) p->addr +
  62. sizeof(kprobe_opcode_t);
  63. }
  64. static void __kprobes arch_prepare_simulate(struct kprobe *p)
  65. {
  66. /* This instructions is not executed xol. No need to adjust the PC */
  67. p->ainsn.api.restore = 0;
  68. }
  69. static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs)
  70. {
  71. struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
  72. if (p->ainsn.api.handler)
  73. p->ainsn.api.handler((u32)p->opcode, (long)p->addr, regs);
  74. /* single step simulated, now go for post processing */
  75. post_kprobe_handler(kcb, regs);
  76. }
  77. int __kprobes arch_prepare_kprobe(struct kprobe *p)
  78. {
  79. unsigned long probe_addr = (unsigned long)p->addr;
  80. extern char __start_rodata[];
  81. extern char __end_rodata[];
  82. if (probe_addr & 0x3)
  83. return -EINVAL;
  84. /* copy instruction */
  85. p->opcode = le32_to_cpu(*p->addr);
  86. if (in_exception_text(probe_addr))
  87. return -EINVAL;
  88. if (probe_addr >= (unsigned long) __start_rodata &&
  89. probe_addr <= (unsigned long) __end_rodata)
  90. return -EINVAL;
  91. /* decode instruction */
  92. switch (arm_kprobe_decode_insn(p->addr, &p->ainsn)) {
  93. case INSN_REJECTED: /* insn not supported */
  94. return -EINVAL;
  95. case INSN_GOOD_NO_SLOT: /* insn need simulation */
  96. p->ainsn.api.insn = NULL;
  97. break;
  98. case INSN_GOOD: /* instruction uses slot */
  99. p->ainsn.api.insn = get_insn_slot();
  100. if (!p->ainsn.api.insn)
  101. return -ENOMEM;
  102. break;
  103. }
  104. /* prepare the instruction */
  105. if (p->ainsn.api.insn)
  106. arch_prepare_ss_slot(p);
  107. else
  108. arch_prepare_simulate(p);
  109. return 0;
  110. }
  111. void *alloc_insn_page(void)
  112. {
  113. void *page;
  114. page = vmalloc_exec(PAGE_SIZE);
  115. if (page)
  116. set_memory_ro((unsigned long)page, 1);
  117. return page;
  118. }
  119. /* arm kprobe: install breakpoint in text */
  120. void __kprobes arch_arm_kprobe(struct kprobe *p)
  121. {
  122. patch_text(p->addr, BRK64_OPCODE_KPROBES);
  123. }
  124. /* disarm kprobe: remove breakpoint from text */
  125. void __kprobes arch_disarm_kprobe(struct kprobe *p)
  126. {
  127. patch_text(p->addr, p->opcode);
  128. }
  129. void __kprobes arch_remove_kprobe(struct kprobe *p)
  130. {
  131. if (p->ainsn.api.insn) {
  132. free_insn_slot(p->ainsn.api.insn, 0);
  133. p->ainsn.api.insn = NULL;
  134. }
  135. }
  136. static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
  137. {
  138. kcb->prev_kprobe.kp = kprobe_running();
  139. kcb->prev_kprobe.status = kcb->kprobe_status;
  140. }
  141. static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
  142. {
  143. __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
  144. kcb->kprobe_status = kcb->prev_kprobe.status;
  145. }
  146. static void __kprobes set_current_kprobe(struct kprobe *p)
  147. {
  148. __this_cpu_write(current_kprobe, p);
  149. }
  150. /*
  151. * When PSTATE.D is set (masked), then software step exceptions can not be
  152. * generated.
  153. * SPSR's D bit shows the value of PSTATE.D immediately before the
  154. * exception was taken. PSTATE.D is set while entering into any exception
  155. * mode, however software clears it for any normal (none-debug-exception)
  156. * mode in the exception entry. Therefore, when we are entering into kprobe
  157. * breakpoint handler from any normal mode then SPSR.D bit is already
  158. * cleared, however it is set when we are entering from any debug exception
  159. * mode.
  160. * Since we always need to generate single step exception after a kprobe
  161. * breakpoint exception therefore we need to clear it unconditionally, when
  162. * we become sure that the current breakpoint exception is for kprobe.
  163. */
  164. static void __kprobes
  165. spsr_set_debug_flag(struct pt_regs *regs, int mask)
  166. {
  167. unsigned long spsr = regs->pstate;
  168. if (mask)
  169. spsr |= PSR_D_BIT;
  170. else
  171. spsr &= ~PSR_D_BIT;
  172. regs->pstate = spsr;
  173. }
  174. /*
  175. * Interrupts need to be disabled before single-step mode is set, and not
  176. * reenabled until after single-step mode ends.
  177. * Without disabling interrupt on local CPU, there is a chance of
  178. * interrupt occurrence in the period of exception return and start of
  179. * out-of-line single-step, that result in wrongly single stepping
  180. * into the interrupt handler.
  181. */
  182. static void __kprobes kprobes_save_local_irqflag(struct kprobe_ctlblk *kcb,
  183. struct pt_regs *regs)
  184. {
  185. kcb->saved_irqflag = regs->pstate;
  186. regs->pstate |= PSR_I_BIT;
  187. }
  188. static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb,
  189. struct pt_regs *regs)
  190. {
  191. if (kcb->saved_irqflag & PSR_I_BIT)
  192. regs->pstate |= PSR_I_BIT;
  193. else
  194. regs->pstate &= ~PSR_I_BIT;
  195. }
  196. static void __kprobes
  197. set_ss_context(struct kprobe_ctlblk *kcb, unsigned long addr)
  198. {
  199. kcb->ss_ctx.ss_pending = true;
  200. kcb->ss_ctx.match_addr = addr + sizeof(kprobe_opcode_t);
  201. }
  202. static void __kprobes clear_ss_context(struct kprobe_ctlblk *kcb)
  203. {
  204. kcb->ss_ctx.ss_pending = false;
  205. kcb->ss_ctx.match_addr = 0;
  206. }
  207. static void __kprobes setup_singlestep(struct kprobe *p,
  208. struct pt_regs *regs,
  209. struct kprobe_ctlblk *kcb, int reenter)
  210. {
  211. unsigned long slot;
  212. if (reenter) {
  213. save_previous_kprobe(kcb);
  214. set_current_kprobe(p);
  215. kcb->kprobe_status = KPROBE_REENTER;
  216. } else {
  217. kcb->kprobe_status = KPROBE_HIT_SS;
  218. }
  219. if (p->ainsn.api.insn) {
  220. /* prepare for single stepping */
  221. slot = (unsigned long)p->ainsn.api.insn;
  222. set_ss_context(kcb, slot); /* mark pending ss */
  223. spsr_set_debug_flag(regs, 0);
  224. /* IRQs and single stepping do not mix well. */
  225. kprobes_save_local_irqflag(kcb, regs);
  226. kernel_enable_single_step(regs);
  227. instruction_pointer_set(regs, slot);
  228. } else {
  229. /* insn simulation */
  230. arch_simulate_insn(p, regs);
  231. }
  232. }
  233. static int __kprobes reenter_kprobe(struct kprobe *p,
  234. struct pt_regs *regs,
  235. struct kprobe_ctlblk *kcb)
  236. {
  237. switch (kcb->kprobe_status) {
  238. case KPROBE_HIT_SSDONE:
  239. case KPROBE_HIT_ACTIVE:
  240. kprobes_inc_nmissed_count(p);
  241. setup_singlestep(p, regs, kcb, 1);
  242. break;
  243. case KPROBE_HIT_SS:
  244. case KPROBE_REENTER:
  245. pr_warn("Unrecoverable kprobe detected.\n");
  246. dump_kprobe(p);
  247. BUG();
  248. break;
  249. default:
  250. WARN_ON(1);
  251. return 0;
  252. }
  253. return 1;
  254. }
  255. static void __kprobes
  256. post_kprobe_handler(struct kprobe_ctlblk *kcb, struct pt_regs *regs)
  257. {
  258. struct kprobe *cur = kprobe_running();
  259. if (!cur)
  260. return;
  261. /* return addr restore if non-branching insn */
  262. if (cur->ainsn.api.restore != 0)
  263. instruction_pointer_set(regs, cur->ainsn.api.restore);
  264. /* restore back original saved kprobe variables and continue */
  265. if (kcb->kprobe_status == KPROBE_REENTER) {
  266. restore_previous_kprobe(kcb);
  267. return;
  268. }
  269. /* call post handler */
  270. kcb->kprobe_status = KPROBE_HIT_SSDONE;
  271. if (cur->post_handler) {
  272. /* post_handler can hit breakpoint and single step
  273. * again, so we enable D-flag for recursive exception.
  274. */
  275. cur->post_handler(cur, regs, 0);
  276. }
  277. reset_current_kprobe();
  278. }
  279. int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr)
  280. {
  281. struct kprobe *cur = kprobe_running();
  282. struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
  283. switch (kcb->kprobe_status) {
  284. case KPROBE_HIT_SS:
  285. case KPROBE_REENTER:
  286. /*
  287. * We are here because the instruction being single
  288. * stepped caused a page fault. We reset the current
  289. * kprobe and the ip points back to the probe address
  290. * and allow the page fault handler to continue as a
  291. * normal page fault.
  292. */
  293. instruction_pointer_set(regs, (unsigned long) cur->addr);
  294. if (!instruction_pointer(regs))
  295. BUG();
  296. kernel_disable_single_step();
  297. if (kcb->kprobe_status == KPROBE_REENTER)
  298. restore_previous_kprobe(kcb);
  299. else
  300. reset_current_kprobe();
  301. break;
  302. case KPROBE_HIT_ACTIVE:
  303. case KPROBE_HIT_SSDONE:
  304. /*
  305. * We increment the nmissed count for accounting,
  306. * we can also use npre/npostfault count for accounting
  307. * these specific fault cases.
  308. */
  309. kprobes_inc_nmissed_count(cur);
  310. /*
  311. * We come here because instructions in the pre/post
  312. * handler caused the page_fault, this could happen
  313. * if handler tries to access user space by
  314. * copy_from_user(), get_user() etc. Let the
  315. * user-specified handler try to fix it first.
  316. */
  317. if (cur->fault_handler && cur->fault_handler(cur, regs, fsr))
  318. return 1;
  319. /*
  320. * In case the user-specified fault handler returned
  321. * zero, try to fix up.
  322. */
  323. if (fixup_exception(regs))
  324. return 1;
  325. }
  326. return 0;
  327. }
  328. static void __kprobes kprobe_handler(struct pt_regs *regs)
  329. {
  330. struct kprobe *p, *cur_kprobe;
  331. struct kprobe_ctlblk *kcb;
  332. unsigned long addr = instruction_pointer(regs);
  333. kcb = get_kprobe_ctlblk();
  334. cur_kprobe = kprobe_running();
  335. p = get_kprobe((kprobe_opcode_t *) addr);
  336. if (p) {
  337. if (cur_kprobe) {
  338. if (reenter_kprobe(p, regs, kcb))
  339. return;
  340. } else {
  341. /* Probe hit */
  342. set_current_kprobe(p);
  343. kcb->kprobe_status = KPROBE_HIT_ACTIVE;
  344. /*
  345. * If we have no pre-handler or it returned 0, we
  346. * continue with normal processing. If we have a
  347. * pre-handler and it returned non-zero, it will
  348. * modify the execution path and no need to single
  349. * stepping. Let's just reset current kprobe and exit.
  350. *
  351. * pre_handler can hit a breakpoint and can step thru
  352. * before return, keep PSTATE D-flag enabled until
  353. * pre_handler return back.
  354. */
  355. if (!p->pre_handler || !p->pre_handler(p, regs)) {
  356. setup_singlestep(p, regs, kcb, 0);
  357. } else
  358. reset_current_kprobe();
  359. }
  360. }
  361. /*
  362. * The breakpoint instruction was removed right
  363. * after we hit it. Another cpu has removed
  364. * either a probepoint or a debugger breakpoint
  365. * at this address. In either case, no further
  366. * handling of this interrupt is appropriate.
  367. * Return back to original instruction, and continue.
  368. */
  369. }
  370. static int __kprobes
  371. kprobe_ss_hit(struct kprobe_ctlblk *kcb, unsigned long addr)
  372. {
  373. if ((kcb->ss_ctx.ss_pending)
  374. && (kcb->ss_ctx.match_addr == addr)) {
  375. clear_ss_context(kcb); /* clear pending ss */
  376. return DBG_HOOK_HANDLED;
  377. }
  378. /* not ours, kprobes should ignore it */
  379. return DBG_HOOK_ERROR;
  380. }
  381. int __kprobes
  382. kprobe_single_step_handler(struct pt_regs *regs, unsigned int esr)
  383. {
  384. struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
  385. int retval;
  386. /* return error if this is not our step */
  387. retval = kprobe_ss_hit(kcb, instruction_pointer(regs));
  388. if (retval == DBG_HOOK_HANDLED) {
  389. kprobes_restore_local_irqflag(kcb, regs);
  390. kernel_disable_single_step();
  391. post_kprobe_handler(kcb, regs);
  392. }
  393. return retval;
  394. }
  395. int __kprobes
  396. kprobe_breakpoint_handler(struct pt_regs *regs, unsigned int esr)
  397. {
  398. kprobe_handler(regs);
  399. return DBG_HOOK_HANDLED;
  400. }
  401. bool arch_within_kprobe_blacklist(unsigned long addr)
  402. {
  403. if ((addr >= (unsigned long)__kprobes_text_start &&
  404. addr < (unsigned long)__kprobes_text_end) ||
  405. (addr >= (unsigned long)__entry_text_start &&
  406. addr < (unsigned long)__entry_text_end) ||
  407. (addr >= (unsigned long)__idmap_text_start &&
  408. addr < (unsigned long)__idmap_text_end) ||
  409. !!search_exception_tables(addr))
  410. return true;
  411. if (!is_kernel_in_hyp_mode()) {
  412. if ((addr >= (unsigned long)__hyp_text_start &&
  413. addr < (unsigned long)__hyp_text_end) ||
  414. (addr >= (unsigned long)__hyp_idmap_text_start &&
  415. addr < (unsigned long)__hyp_idmap_text_end))
  416. return true;
  417. }
  418. return false;
  419. }
  420. void __kprobes __used *trampoline_probe_handler(struct pt_regs *regs)
  421. {
  422. struct kretprobe_instance *ri = NULL;
  423. struct hlist_head *head, empty_rp;
  424. struct hlist_node *tmp;
  425. unsigned long flags, orig_ret_address = 0;
  426. unsigned long trampoline_address =
  427. (unsigned long)&kretprobe_trampoline;
  428. kprobe_opcode_t *correct_ret_addr = NULL;
  429. INIT_HLIST_HEAD(&empty_rp);
  430. kretprobe_hash_lock(current, &head, &flags);
  431. /*
  432. * It is possible to have multiple instances associated with a given
  433. * task either because multiple functions in the call path have
  434. * return probes installed on them, and/or more than one
  435. * return probe was registered for a target function.
  436. *
  437. * We can handle this because:
  438. * - instances are always pushed into the head of the list
  439. * - when multiple return probes are registered for the same
  440. * function, the (chronologically) first instance's ret_addr
  441. * will be the real return address, and all the rest will
  442. * point to kretprobe_trampoline.
  443. */
  444. hlist_for_each_entry_safe(ri, tmp, head, hlist) {
  445. if (ri->task != current)
  446. /* another task is sharing our hash bucket */
  447. continue;
  448. orig_ret_address = (unsigned long)ri->ret_addr;
  449. if (orig_ret_address != trampoline_address)
  450. /*
  451. * This is the real return address. Any other
  452. * instances associated with this task are for
  453. * other calls deeper on the call stack
  454. */
  455. break;
  456. }
  457. kretprobe_assert(ri, orig_ret_address, trampoline_address);
  458. correct_ret_addr = ri->ret_addr;
  459. hlist_for_each_entry_safe(ri, tmp, head, hlist) {
  460. if (ri->task != current)
  461. /* another task is sharing our hash bucket */
  462. continue;
  463. orig_ret_address = (unsigned long)ri->ret_addr;
  464. if (ri->rp && ri->rp->handler) {
  465. __this_cpu_write(current_kprobe, &ri->rp->kp);
  466. get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
  467. ri->ret_addr = correct_ret_addr;
  468. ri->rp->handler(ri, regs);
  469. __this_cpu_write(current_kprobe, NULL);
  470. }
  471. recycle_rp_inst(ri, &empty_rp);
  472. if (orig_ret_address != trampoline_address)
  473. /*
  474. * This is the real return address. Any other
  475. * instances associated with this task are for
  476. * other calls deeper on the call stack
  477. */
  478. break;
  479. }
  480. kretprobe_hash_unlock(current, &flags);
  481. hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
  482. hlist_del(&ri->hlist);
  483. kfree(ri);
  484. }
  485. return (void *)orig_ret_address;
  486. }
  487. void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
  488. struct pt_regs *regs)
  489. {
  490. ri->ret_addr = (kprobe_opcode_t *)regs->regs[30];
  491. /* replace return addr (x30) with trampoline */
  492. regs->regs[30] = (long)&kretprobe_trampoline;
  493. }
  494. int __kprobes arch_trampoline_kprobe(struct kprobe *p)
  495. {
  496. return 0;
  497. }
  498. int __init arch_init_kprobes(void)
  499. {
  500. return 0;
  501. }