kprobes.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667
  1. /*
  2. * Kernel Probes (KProbes)
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  17. *
  18. * Copyright (C) IBM Corporation, 2002, 2004
  19. *
  20. * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
  21. * Probes initial implementation ( includes contributions from
  22. * Rusty Russell).
  23. * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
  24. * interface to access function arguments.
  25. * 2004-Nov Ananth N Mavinakayanahalli <ananth@in.ibm.com> kprobes port
  26. * for PPC64
  27. */
  28. #include <linux/kprobes.h>
  29. #include <linux/ptrace.h>
  30. #include <linux/preempt.h>
  31. #include <linux/extable.h>
  32. #include <linux/kdebug.h>
  33. #include <linux/slab.h>
  34. #include <asm/code-patching.h>
  35. #include <asm/cacheflush.h>
  36. #include <asm/sstep.h>
  37. #include <asm/sections.h>
  38. #include <linux/uaccess.h>
  39. DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
  40. DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
  41. struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}};
  42. bool arch_within_kprobe_blacklist(unsigned long addr)
  43. {
  44. return (addr >= (unsigned long)__kprobes_text_start &&
  45. addr < (unsigned long)__kprobes_text_end) ||
  46. (addr >= (unsigned long)_stext &&
  47. addr < (unsigned long)__head_end);
  48. }
  49. kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset)
  50. {
  51. kprobe_opcode_t *addr;
  52. #ifdef PPC64_ELF_ABI_v2
  53. /* PPC64 ABIv2 needs local entry point */
  54. addr = (kprobe_opcode_t *)kallsyms_lookup_name(name);
  55. if (addr && !offset) {
  56. #ifdef CONFIG_KPROBES_ON_FTRACE
  57. unsigned long faddr;
  58. /*
  59. * Per livepatch.h, ftrace location is always within the first
  60. * 16 bytes of a function on powerpc with -mprofile-kernel.
  61. */
  62. faddr = ftrace_location_range((unsigned long)addr,
  63. (unsigned long)addr + 16);
  64. if (faddr)
  65. addr = (kprobe_opcode_t *)faddr;
  66. else
  67. #endif
  68. addr = (kprobe_opcode_t *)ppc_function_entry(addr);
  69. }
  70. #elif defined(PPC64_ELF_ABI_v1)
  71. /*
  72. * 64bit powerpc ABIv1 uses function descriptors:
  73. * - Check for the dot variant of the symbol first.
  74. * - If that fails, try looking up the symbol provided.
  75. *
  76. * This ensures we always get to the actual symbol and not
  77. * the descriptor.
  78. *
  79. * Also handle <module:symbol> format.
  80. */
  81. char dot_name[MODULE_NAME_LEN + 1 + KSYM_NAME_LEN];
  82. const char *modsym;
  83. bool dot_appended = false;
  84. if ((modsym = strchr(name, ':')) != NULL) {
  85. modsym++;
  86. if (*modsym != '\0' && *modsym != '.') {
  87. /* Convert to <module:.symbol> */
  88. strncpy(dot_name, name, modsym - name);
  89. dot_name[modsym - name] = '.';
  90. dot_name[modsym - name + 1] = '\0';
  91. strncat(dot_name, modsym,
  92. sizeof(dot_name) - (modsym - name) - 2);
  93. dot_appended = true;
  94. } else {
  95. dot_name[0] = '\0';
  96. strncat(dot_name, name, sizeof(dot_name) - 1);
  97. }
  98. } else if (name[0] != '.') {
  99. dot_name[0] = '.';
  100. dot_name[1] = '\0';
  101. strncat(dot_name, name, KSYM_NAME_LEN - 2);
  102. dot_appended = true;
  103. } else {
  104. dot_name[0] = '\0';
  105. strncat(dot_name, name, KSYM_NAME_LEN - 1);
  106. }
  107. addr = (kprobe_opcode_t *)kallsyms_lookup_name(dot_name);
  108. if (!addr && dot_appended) {
  109. /* Let's try the original non-dot symbol lookup */
  110. addr = (kprobe_opcode_t *)kallsyms_lookup_name(name);
  111. }
  112. #else
  113. addr = (kprobe_opcode_t *)kallsyms_lookup_name(name);
  114. #endif
  115. return addr;
  116. }
  117. int arch_prepare_kprobe(struct kprobe *p)
  118. {
  119. int ret = 0;
  120. kprobe_opcode_t insn = *p->addr;
  121. if ((unsigned long)p->addr & 0x03) {
  122. printk("Attempt to register kprobe at an unaligned address\n");
  123. ret = -EINVAL;
  124. } else if (IS_MTMSRD(insn) || IS_RFID(insn) || IS_RFI(insn)) {
  125. printk("Cannot register a kprobe on rfi/rfid or mtmsr[d]\n");
  126. ret = -EINVAL;
  127. }
  128. /* insn must be on a special executable page on ppc64. This is
  129. * not explicitly required on ppc32 (right now), but it doesn't hurt */
  130. if (!ret) {
  131. p->ainsn.insn = get_insn_slot();
  132. if (!p->ainsn.insn)
  133. ret = -ENOMEM;
  134. }
  135. if (!ret) {
  136. memcpy(p->ainsn.insn, p->addr,
  137. MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
  138. p->opcode = *p->addr;
  139. flush_icache_range((unsigned long)p->ainsn.insn,
  140. (unsigned long)p->ainsn.insn + sizeof(kprobe_opcode_t));
  141. }
  142. p->ainsn.boostable = 0;
  143. return ret;
  144. }
  145. NOKPROBE_SYMBOL(arch_prepare_kprobe);
  146. void arch_arm_kprobe(struct kprobe *p)
  147. {
  148. *p->addr = BREAKPOINT_INSTRUCTION;
  149. flush_icache_range((unsigned long) p->addr,
  150. (unsigned long) p->addr + sizeof(kprobe_opcode_t));
  151. }
  152. NOKPROBE_SYMBOL(arch_arm_kprobe);
  153. void arch_disarm_kprobe(struct kprobe *p)
  154. {
  155. *p->addr = p->opcode;
  156. flush_icache_range((unsigned long) p->addr,
  157. (unsigned long) p->addr + sizeof(kprobe_opcode_t));
  158. }
  159. NOKPROBE_SYMBOL(arch_disarm_kprobe);
  160. void arch_remove_kprobe(struct kprobe *p)
  161. {
  162. if (p->ainsn.insn) {
  163. free_insn_slot(p->ainsn.insn, 0);
  164. p->ainsn.insn = NULL;
  165. }
  166. }
  167. NOKPROBE_SYMBOL(arch_remove_kprobe);
  168. static nokprobe_inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
  169. {
  170. enable_single_step(regs);
  171. /*
  172. * On powerpc we should single step on the original
  173. * instruction even if the probed insn is a trap
  174. * variant as values in regs could play a part in
  175. * if the trap is taken or not
  176. */
  177. regs->nip = (unsigned long)p->ainsn.insn;
  178. }
  179. static nokprobe_inline void save_previous_kprobe(struct kprobe_ctlblk *kcb)
  180. {
  181. kcb->prev_kprobe.kp = kprobe_running();
  182. kcb->prev_kprobe.status = kcb->kprobe_status;
  183. kcb->prev_kprobe.saved_msr = kcb->kprobe_saved_msr;
  184. }
  185. static nokprobe_inline void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
  186. {
  187. __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
  188. kcb->kprobe_status = kcb->prev_kprobe.status;
  189. kcb->kprobe_saved_msr = kcb->prev_kprobe.saved_msr;
  190. }
  191. static nokprobe_inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
  192. struct kprobe_ctlblk *kcb)
  193. {
  194. __this_cpu_write(current_kprobe, p);
  195. kcb->kprobe_saved_msr = regs->msr;
  196. }
  197. bool arch_function_offset_within_entry(unsigned long offset)
  198. {
  199. #ifdef PPC64_ELF_ABI_v2
  200. #ifdef CONFIG_KPROBES_ON_FTRACE
  201. return offset <= 16;
  202. #else
  203. return offset <= 8;
  204. #endif
  205. #else
  206. return !offset;
  207. #endif
  208. }
  209. void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
  210. {
  211. ri->ret_addr = (kprobe_opcode_t *)regs->link;
  212. /* Replace the return addr with trampoline addr */
  213. regs->link = (unsigned long)kretprobe_trampoline;
  214. }
  215. NOKPROBE_SYMBOL(arch_prepare_kretprobe);
  216. int try_to_emulate(struct kprobe *p, struct pt_regs *regs)
  217. {
  218. int ret;
  219. unsigned int insn = *p->ainsn.insn;
  220. /* regs->nip is also adjusted if emulate_step returns 1 */
  221. ret = emulate_step(regs, insn);
  222. if (ret > 0) {
  223. /*
  224. * Once this instruction has been boosted
  225. * successfully, set the boostable flag
  226. */
  227. if (unlikely(p->ainsn.boostable == 0))
  228. p->ainsn.boostable = 1;
  229. } else if (ret < 0) {
  230. /*
  231. * We don't allow kprobes on mtmsr(d)/rfi(d), etc.
  232. * So, we should never get here... but, its still
  233. * good to catch them, just in case...
  234. */
  235. printk("Can't step on instruction %x\n", insn);
  236. BUG();
  237. } else if (ret == 0)
  238. /* This instruction can't be boosted */
  239. p->ainsn.boostable = -1;
  240. return ret;
  241. }
  242. NOKPROBE_SYMBOL(try_to_emulate);
  243. int kprobe_handler(struct pt_regs *regs)
  244. {
  245. struct kprobe *p;
  246. int ret = 0;
  247. unsigned int *addr = (unsigned int *)regs->nip;
  248. struct kprobe_ctlblk *kcb;
  249. if (user_mode(regs))
  250. return 0;
  251. /*
  252. * We don't want to be preempted for the entire
  253. * duration of kprobe processing
  254. */
  255. preempt_disable();
  256. kcb = get_kprobe_ctlblk();
  257. /* Check we're not actually recursing */
  258. if (kprobe_running()) {
  259. p = get_kprobe(addr);
  260. if (p) {
  261. kprobe_opcode_t insn = *p->ainsn.insn;
  262. if (kcb->kprobe_status == KPROBE_HIT_SS &&
  263. is_trap(insn)) {
  264. /* Turn off 'trace' bits */
  265. regs->msr &= ~MSR_SINGLESTEP;
  266. regs->msr |= kcb->kprobe_saved_msr;
  267. goto no_kprobe;
  268. }
  269. /* We have reentered the kprobe_handler(), since
  270. * another probe was hit while within the handler.
  271. * We here save the original kprobes variables and
  272. * just single step on the instruction of the new probe
  273. * without calling any user handlers.
  274. */
  275. save_previous_kprobe(kcb);
  276. set_current_kprobe(p, regs, kcb);
  277. kprobes_inc_nmissed_count(p);
  278. kcb->kprobe_status = KPROBE_REENTER;
  279. if (p->ainsn.boostable >= 0) {
  280. ret = try_to_emulate(p, regs);
  281. if (ret > 0) {
  282. restore_previous_kprobe(kcb);
  283. preempt_enable_no_resched();
  284. return 1;
  285. }
  286. }
  287. prepare_singlestep(p, regs);
  288. return 1;
  289. } else {
  290. if (*addr != BREAKPOINT_INSTRUCTION) {
  291. /* If trap variant, then it belongs not to us */
  292. kprobe_opcode_t cur_insn = *addr;
  293. if (is_trap(cur_insn))
  294. goto no_kprobe;
  295. /* The breakpoint instruction was removed by
  296. * another cpu right after we hit, no further
  297. * handling of this interrupt is appropriate
  298. */
  299. ret = 1;
  300. goto no_kprobe;
  301. }
  302. p = __this_cpu_read(current_kprobe);
  303. if (p->break_handler && p->break_handler(p, regs)) {
  304. if (!skip_singlestep(p, regs, kcb))
  305. goto ss_probe;
  306. ret = 1;
  307. }
  308. }
  309. goto no_kprobe;
  310. }
  311. p = get_kprobe(addr);
  312. if (!p) {
  313. if (*addr != BREAKPOINT_INSTRUCTION) {
  314. /*
  315. * PowerPC has multiple variants of the "trap"
  316. * instruction. If the current instruction is a
  317. * trap variant, it could belong to someone else
  318. */
  319. kprobe_opcode_t cur_insn = *addr;
  320. if (is_trap(cur_insn))
  321. goto no_kprobe;
  322. /*
  323. * The breakpoint instruction was removed right
  324. * after we hit it. Another cpu has removed
  325. * either a probepoint or a debugger breakpoint
  326. * at this address. In either case, no further
  327. * handling of this interrupt is appropriate.
  328. */
  329. ret = 1;
  330. }
  331. /* Not one of ours: let kernel handle it */
  332. goto no_kprobe;
  333. }
  334. kcb->kprobe_status = KPROBE_HIT_ACTIVE;
  335. set_current_kprobe(p, regs, kcb);
  336. if (p->pre_handler && p->pre_handler(p, regs))
  337. /* handler has already set things up, so skip ss setup */
  338. return 1;
  339. ss_probe:
  340. if (p->ainsn.boostable >= 0) {
  341. ret = try_to_emulate(p, regs);
  342. if (ret > 0) {
  343. if (p->post_handler)
  344. p->post_handler(p, regs, 0);
  345. kcb->kprobe_status = KPROBE_HIT_SSDONE;
  346. reset_current_kprobe();
  347. preempt_enable_no_resched();
  348. return 1;
  349. }
  350. }
  351. prepare_singlestep(p, regs);
  352. kcb->kprobe_status = KPROBE_HIT_SS;
  353. return 1;
  354. no_kprobe:
  355. preempt_enable_no_resched();
  356. return ret;
  357. }
  358. NOKPROBE_SYMBOL(kprobe_handler);
  359. /*
  360. * Function return probe trampoline:
  361. * - init_kprobes() establishes a probepoint here
  362. * - When the probed function returns, this probe
  363. * causes the handlers to fire
  364. */
  365. asm(".global kretprobe_trampoline\n"
  366. ".type kretprobe_trampoline, @function\n"
  367. "kretprobe_trampoline:\n"
  368. "nop\n"
  369. "blr\n"
  370. ".size kretprobe_trampoline, .-kretprobe_trampoline\n");
  371. /*
  372. * Called when the probe at kretprobe trampoline is hit
  373. */
  374. static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
  375. {
  376. struct kretprobe_instance *ri = NULL;
  377. struct hlist_head *head, empty_rp;
  378. struct hlist_node *tmp;
  379. unsigned long flags, orig_ret_address = 0;
  380. unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
  381. INIT_HLIST_HEAD(&empty_rp);
  382. kretprobe_hash_lock(current, &head, &flags);
  383. /*
  384. * It is possible to have multiple instances associated with a given
  385. * task either because an multiple functions in the call path
  386. * have a return probe installed on them, and/or more than one return
  387. * return probe was registered for a target function.
  388. *
  389. * We can handle this because:
  390. * - instances are always inserted at the head of the list
  391. * - when multiple return probes are registered for the same
  392. * function, the first instance's ret_addr will point to the
  393. * real return address, and all the rest will point to
  394. * kretprobe_trampoline
  395. */
  396. hlist_for_each_entry_safe(ri, tmp, head, hlist) {
  397. if (ri->task != current)
  398. /* another task is sharing our hash bucket */
  399. continue;
  400. if (ri->rp && ri->rp->handler)
  401. ri->rp->handler(ri, regs);
  402. orig_ret_address = (unsigned long)ri->ret_addr;
  403. recycle_rp_inst(ri, &empty_rp);
  404. if (orig_ret_address != trampoline_address)
  405. /*
  406. * This is the real return address. Any other
  407. * instances associated with this task are for
  408. * other calls deeper on the call stack
  409. */
  410. break;
  411. }
  412. kretprobe_assert(ri, orig_ret_address, trampoline_address);
  413. regs->nip = orig_ret_address;
  414. /*
  415. * Make LR point to the orig_ret_address.
  416. * When the 'nop' inside the kretprobe_trampoline
  417. * is optimized, we can do a 'blr' after executing the
  418. * detour buffer code.
  419. */
  420. regs->link = orig_ret_address;
  421. reset_current_kprobe();
  422. kretprobe_hash_unlock(current, &flags);
  423. preempt_enable_no_resched();
  424. hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
  425. hlist_del(&ri->hlist);
  426. kfree(ri);
  427. }
  428. /*
  429. * By returning a non-zero value, we are telling
  430. * kprobe_handler() that we don't want the post_handler
  431. * to run (and have re-enabled preemption)
  432. */
  433. return 1;
  434. }
  435. NOKPROBE_SYMBOL(trampoline_probe_handler);
  436. /*
  437. * Called after single-stepping. p->addr is the address of the
  438. * instruction whose first byte has been replaced by the "breakpoint"
  439. * instruction. To avoid the SMP problems that can occur when we
  440. * temporarily put back the original opcode to single-step, we
  441. * single-stepped a copy of the instruction. The address of this
  442. * copy is p->ainsn.insn.
  443. */
  444. int kprobe_post_handler(struct pt_regs *regs)
  445. {
  446. struct kprobe *cur = kprobe_running();
  447. struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
  448. if (!cur || user_mode(regs))
  449. return 0;
  450. /* make sure we got here for instruction we have a kprobe on */
  451. if (((unsigned long)cur->ainsn.insn + 4) != regs->nip)
  452. return 0;
  453. if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
  454. kcb->kprobe_status = KPROBE_HIT_SSDONE;
  455. cur->post_handler(cur, regs, 0);
  456. }
  457. /* Adjust nip to after the single-stepped instruction */
  458. regs->nip = (unsigned long)cur->addr + 4;
  459. regs->msr |= kcb->kprobe_saved_msr;
  460. /*Restore back the original saved kprobes variables and continue. */
  461. if (kcb->kprobe_status == KPROBE_REENTER) {
  462. restore_previous_kprobe(kcb);
  463. goto out;
  464. }
  465. reset_current_kprobe();
  466. out:
  467. preempt_enable_no_resched();
  468. /*
  469. * if somebody else is singlestepping across a probe point, msr
  470. * will have DE/SE set, in which case, continue the remaining processing
  471. * of do_debug, as if this is not a probe hit.
  472. */
  473. if (regs->msr & MSR_SINGLESTEP)
  474. return 0;
  475. return 1;
  476. }
  477. NOKPROBE_SYMBOL(kprobe_post_handler);
  478. int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
  479. {
  480. struct kprobe *cur = kprobe_running();
  481. struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
  482. const struct exception_table_entry *entry;
  483. switch(kcb->kprobe_status) {
  484. case KPROBE_HIT_SS:
  485. case KPROBE_REENTER:
  486. /*
  487. * We are here because the instruction being single
  488. * stepped caused a page fault. We reset the current
  489. * kprobe and the nip points back to the probe address
  490. * and allow the page fault handler to continue as a
  491. * normal page fault.
  492. */
  493. regs->nip = (unsigned long)cur->addr;
  494. regs->msr &= ~MSR_SINGLESTEP; /* Turn off 'trace' bits */
  495. regs->msr |= kcb->kprobe_saved_msr;
  496. if (kcb->kprobe_status == KPROBE_REENTER)
  497. restore_previous_kprobe(kcb);
  498. else
  499. reset_current_kprobe();
  500. preempt_enable_no_resched();
  501. break;
  502. case KPROBE_HIT_ACTIVE:
  503. case KPROBE_HIT_SSDONE:
  504. /*
  505. * We increment the nmissed count for accounting,
  506. * we can also use npre/npostfault count for accounting
  507. * these specific fault cases.
  508. */
  509. kprobes_inc_nmissed_count(cur);
  510. /*
  511. * We come here because instructions in the pre/post
  512. * handler caused the page_fault, this could happen
  513. * if handler tries to access user space by
  514. * copy_from_user(), get_user() etc. Let the
  515. * user-specified handler try to fix it first.
  516. */
  517. if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
  518. return 1;
  519. /*
  520. * In case the user-specified fault handler returned
  521. * zero, try to fix up.
  522. */
  523. if ((entry = search_exception_tables(regs->nip)) != NULL) {
  524. regs->nip = extable_fixup(entry);
  525. return 1;
  526. }
  527. /*
  528. * fixup_exception() could not handle it,
  529. * Let do_page_fault() fix it.
  530. */
  531. break;
  532. default:
  533. break;
  534. }
  535. return 0;
  536. }
  537. NOKPROBE_SYMBOL(kprobe_fault_handler);
  538. unsigned long arch_deref_entry_point(void *entry)
  539. {
  540. return ppc_global_function_entry(entry);
  541. }
  542. NOKPROBE_SYMBOL(arch_deref_entry_point);
  543. int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
  544. {
  545. struct jprobe *jp = container_of(p, struct jprobe, kp);
  546. struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
  547. memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs));
  548. /* setup return addr to the jprobe handler routine */
  549. regs->nip = arch_deref_entry_point(jp->entry);
  550. #ifdef PPC64_ELF_ABI_v2
  551. regs->gpr[12] = (unsigned long)jp->entry;
  552. #elif defined(PPC64_ELF_ABI_v1)
  553. regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc);
  554. #endif
  555. return 1;
  556. }
  557. NOKPROBE_SYMBOL(setjmp_pre_handler);
  558. void __used jprobe_return(void)
  559. {
  560. asm volatile("trap" ::: "memory");
  561. }
  562. NOKPROBE_SYMBOL(jprobe_return);
  563. static void __used jprobe_return_end(void)
  564. {
  565. }
  566. NOKPROBE_SYMBOL(jprobe_return_end);
  567. int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
  568. {
  569. struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
  570. /*
  571. * FIXME - we should ideally be validating that we got here 'cos
  572. * of the "trap" in jprobe_return() above, before restoring the
  573. * saved regs...
  574. */
  575. memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
  576. preempt_enable_no_resched();
  577. return 1;
  578. }
  579. NOKPROBE_SYMBOL(longjmp_break_handler);
  580. static struct kprobe trampoline_p = {
  581. .addr = (kprobe_opcode_t *) &kretprobe_trampoline,
  582. .pre_handler = trampoline_probe_handler
  583. };
  584. int __init arch_init_kprobes(void)
  585. {
  586. return register_kprobe(&trampoline_p);
  587. }
  588. int arch_trampoline_kprobe(struct kprobe *p)
  589. {
  590. if (p->addr == (kprobe_opcode_t *)&kretprobe_trampoline)
  591. return 1;
  592. return 0;
  593. }
  594. NOKPROBE_SYMBOL(arch_trampoline_kprobe);