ftrace.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619
  1. /*
  2. * Code for replacing ftrace calls with jumps.
  3. *
  4. * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  5. *
  6. * Thanks goes out to P.A. Semi, Inc for supplying me with a PPC64 box.
  7. *
  8. * Added function graph tracer code, taken from x86 that was written
  9. * by Frederic Weisbecker, and ported to PPC by Steven Rostedt.
  10. *
  11. */
  12. #define pr_fmt(fmt) "ftrace-powerpc: " fmt
  13. #include <linux/spinlock.h>
  14. #include <linux/hardirq.h>
  15. #include <linux/uaccess.h>
  16. #include <linux/module.h>
  17. #include <linux/ftrace.h>
  18. #include <linux/percpu.h>
  19. #include <linux/init.h>
  20. #include <linux/list.h>
  21. #include <asm/cacheflush.h>
  22. #include <asm/code-patching.h>
  23. #include <asm/ftrace.h>
  24. #include <asm/syscall.h>
  25. #ifdef CONFIG_DYNAMIC_FTRACE
  26. static unsigned int
  27. ftrace_call_replace(unsigned long ip, unsigned long addr, int link)
  28. {
  29. unsigned int op;
  30. addr = ppc_function_entry((void *)addr);
  31. /* if (link) set op to 'bl' else 'b' */
  32. op = create_branch((unsigned int *)ip, addr, link ? 1 : 0);
  33. return op;
  34. }
  35. static int
  36. ftrace_modify_code(unsigned long ip, unsigned int old, unsigned int new)
  37. {
  38. unsigned int replaced;
  39. /*
  40. * Note:
  41. * We are paranoid about modifying text, as if a bug was to happen, it
  42. * could cause us to read or write to someplace that could cause harm.
  43. * Carefully read and modify the code with probe_kernel_*(), and make
  44. * sure what we read is what we expected it to be before modifying it.
  45. */
  46. /* read the text we want to modify */
  47. if (probe_kernel_read(&replaced, (void *)ip, MCOUNT_INSN_SIZE))
  48. return -EFAULT;
  49. /* Make sure it is what we expect it to be */
  50. if (replaced != old) {
  51. pr_err("%p: replaced (%#x) != old (%#x)",
  52. (void *)ip, replaced, old);
  53. return -EINVAL;
  54. }
  55. /* replace the text with the new text */
  56. if (patch_instruction((unsigned int *)ip, new))
  57. return -EPERM;
  58. return 0;
  59. }
  60. /*
  61. * Helper functions that are the same for both PPC64 and PPC32.
  62. */
  63. static int test_24bit_addr(unsigned long ip, unsigned long addr)
  64. {
  65. addr = ppc_function_entry((void *)addr);
  66. /* use the create_branch to verify that this offset can be branched */
  67. return create_branch((unsigned int *)ip, addr, 0);
  68. }
  69. #ifdef CONFIG_MODULES
  70. static int is_bl_op(unsigned int op)
  71. {
  72. return (op & 0xfc000003) == 0x48000001;
  73. }
  74. static unsigned long find_bl_target(unsigned long ip, unsigned int op)
  75. {
  76. static int offset;
  77. offset = (op & 0x03fffffc);
  78. /* make it signed */
  79. if (offset & 0x02000000)
  80. offset |= 0xfe000000;
  81. return ip + (long)offset;
  82. }
  83. #ifdef CONFIG_PPC64
  84. static int
  85. __ftrace_make_nop(struct module *mod,
  86. struct dyn_ftrace *rec, unsigned long addr)
  87. {
  88. unsigned long entry, ptr, tramp;
  89. unsigned long ip = rec->ip;
  90. unsigned int op, pop;
  91. /* read where this goes */
  92. if (probe_kernel_read(&op, (void *)ip, sizeof(int))) {
  93. pr_err("Fetching opcode failed.\n");
  94. return -EFAULT;
  95. }
  96. /* Make sure that that this is still a 24bit jump */
  97. if (!is_bl_op(op)) {
  98. pr_err("Not expected bl: opcode is %x\n", op);
  99. return -EINVAL;
  100. }
  101. /* lets find where the pointer goes */
  102. tramp = find_bl_target(ip, op);
  103. pr_devel("ip:%lx jumps to %lx", ip, tramp);
  104. if (module_trampoline_target(mod, tramp, &ptr)) {
  105. pr_err("Failed to get trampoline target\n");
  106. return -EFAULT;
  107. }
  108. pr_devel("trampoline target %lx", ptr);
  109. entry = ppc_global_function_entry((void *)addr);
  110. /* This should match what was called */
  111. if (ptr != entry) {
  112. pr_err("addr %lx does not match expected %lx\n", ptr, entry);
  113. return -EINVAL;
  114. }
  115. #ifdef CC_USING_MPROFILE_KERNEL
  116. /* When using -mkernel_profile there is no load to jump over */
  117. pop = PPC_INST_NOP;
  118. if (probe_kernel_read(&op, (void *)(ip - 4), 4)) {
  119. pr_err("Fetching instruction at %lx failed.\n", ip - 4);
  120. return -EFAULT;
  121. }
  122. /* We expect either a mflr r0, or a std r0, LRSAVE(r1) */
  123. if (op != PPC_INST_MFLR && op != PPC_INST_STD_LR) {
  124. pr_err("Unexpected instruction %08x around bl _mcount\n", op);
  125. return -EINVAL;
  126. }
  127. #else
  128. /*
  129. * Our original call site looks like:
  130. *
  131. * bl <tramp>
  132. * ld r2,XX(r1)
  133. *
  134. * Milton Miller pointed out that we can not simply nop the branch.
  135. * If a task was preempted when calling a trace function, the nops
  136. * will remove the way to restore the TOC in r2 and the r2 TOC will
  137. * get corrupted.
  138. *
  139. * Use a b +8 to jump over the load.
  140. */
  141. pop = PPC_INST_BRANCH | 8; /* b +8 */
  142. /*
  143. * Check what is in the next instruction. We can see ld r2,40(r1), but
  144. * on first pass after boot we will see mflr r0.
  145. */
  146. if (probe_kernel_read(&op, (void *)(ip+4), MCOUNT_INSN_SIZE)) {
  147. pr_err("Fetching op failed.\n");
  148. return -EFAULT;
  149. }
  150. if (op != PPC_INST_LD_TOC) {
  151. pr_err("Expected %08x found %08x\n", PPC_INST_LD_TOC, op);
  152. return -EINVAL;
  153. }
  154. #endif /* CC_USING_MPROFILE_KERNEL */
  155. if (patch_instruction((unsigned int *)ip, pop)) {
  156. pr_err("Patching NOP failed.\n");
  157. return -EPERM;
  158. }
  159. return 0;
  160. }
  161. #else /* !PPC64 */
  162. static int
  163. __ftrace_make_nop(struct module *mod,
  164. struct dyn_ftrace *rec, unsigned long addr)
  165. {
  166. unsigned int op;
  167. unsigned int jmp[4];
  168. unsigned long ip = rec->ip;
  169. unsigned long tramp;
  170. if (probe_kernel_read(&op, (void *)ip, MCOUNT_INSN_SIZE))
  171. return -EFAULT;
  172. /* Make sure that that this is still a 24bit jump */
  173. if (!is_bl_op(op)) {
  174. pr_err("Not expected bl: opcode is %x\n", op);
  175. return -EINVAL;
  176. }
  177. /* lets find where the pointer goes */
  178. tramp = find_bl_target(ip, op);
  179. /*
  180. * On PPC32 the trampoline looks like:
  181. * 0x3d, 0x80, 0x00, 0x00 lis r12,sym@ha
  182. * 0x39, 0x8c, 0x00, 0x00 addi r12,r12,sym@l
  183. * 0x7d, 0x89, 0x03, 0xa6 mtctr r12
  184. * 0x4e, 0x80, 0x04, 0x20 bctr
  185. */
  186. pr_devel("ip:%lx jumps to %lx", ip, tramp);
  187. /* Find where the trampoline jumps to */
  188. if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) {
  189. pr_err("Failed to read %lx\n", tramp);
  190. return -EFAULT;
  191. }
  192. pr_devel(" %08x %08x ", jmp[0], jmp[1]);
  193. /* verify that this is what we expect it to be */
  194. if (((jmp[0] & 0xffff0000) != 0x3d800000) ||
  195. ((jmp[1] & 0xffff0000) != 0x398c0000) ||
  196. (jmp[2] != 0x7d8903a6) ||
  197. (jmp[3] != 0x4e800420)) {
  198. pr_err("Not a trampoline\n");
  199. return -EINVAL;
  200. }
  201. tramp = (jmp[1] & 0xffff) |
  202. ((jmp[0] & 0xffff) << 16);
  203. if (tramp & 0x8000)
  204. tramp -= 0x10000;
  205. pr_devel(" %lx ", tramp);
  206. if (tramp != addr) {
  207. pr_err("Trampoline location %08lx does not match addr\n",
  208. tramp);
  209. return -EINVAL;
  210. }
  211. op = PPC_INST_NOP;
  212. if (patch_instruction((unsigned int *)ip, op))
  213. return -EPERM;
  214. return 0;
  215. }
  216. #endif /* PPC64 */
  217. #endif /* CONFIG_MODULES */
  218. int ftrace_make_nop(struct module *mod,
  219. struct dyn_ftrace *rec, unsigned long addr)
  220. {
  221. unsigned long ip = rec->ip;
  222. unsigned int old, new;
  223. /*
  224. * If the calling address is more that 24 bits away,
  225. * then we had to use a trampoline to make the call.
  226. * Otherwise just update the call site.
  227. */
  228. if (test_24bit_addr(ip, addr)) {
  229. /* within range */
  230. old = ftrace_call_replace(ip, addr, 1);
  231. new = PPC_INST_NOP;
  232. return ftrace_modify_code(ip, old, new);
  233. }
  234. #ifdef CONFIG_MODULES
  235. /*
  236. * Out of range jumps are called from modules.
  237. * We should either already have a pointer to the module
  238. * or it has been passed in.
  239. */
  240. if (!rec->arch.mod) {
  241. if (!mod) {
  242. pr_err("No module loaded addr=%lx\n", addr);
  243. return -EFAULT;
  244. }
  245. rec->arch.mod = mod;
  246. } else if (mod) {
  247. if (mod != rec->arch.mod) {
  248. pr_err("Record mod %p not equal to passed in mod %p\n",
  249. rec->arch.mod, mod);
  250. return -EINVAL;
  251. }
  252. /* nothing to do if mod == rec->arch.mod */
  253. } else
  254. mod = rec->arch.mod;
  255. return __ftrace_make_nop(mod, rec, addr);
  256. #else
  257. /* We should not get here without modules */
  258. return -EINVAL;
  259. #endif /* CONFIG_MODULES */
  260. }
  261. #ifdef CONFIG_MODULES
  262. #ifdef CONFIG_PPC64
  263. /*
  264. * Examine the existing instructions for __ftrace_make_call.
  265. * They should effectively be a NOP, and follow formal constraints,
  266. * depending on the ABI. Return false if they don't.
  267. */
  268. #ifndef CC_USING_MPROFILE_KERNEL
  269. static int
  270. expected_nop_sequence(void *ip, unsigned int op0, unsigned int op1)
  271. {
  272. /*
  273. * We expect to see:
  274. *
  275. * b +8
  276. * ld r2,XX(r1)
  277. *
  278. * The load offset is different depending on the ABI. For simplicity
  279. * just mask it out when doing the compare.
  280. */
  281. if ((op0 != 0x48000008) || ((op1 & 0xffff0000) != 0xe8410000))
  282. return 0;
  283. return 1;
  284. }
  285. #else
  286. static int
  287. expected_nop_sequence(void *ip, unsigned int op0, unsigned int op1)
  288. {
  289. /* look for patched "NOP" on ppc64 with -mprofile-kernel */
  290. if (op0 != PPC_INST_NOP)
  291. return 0;
  292. return 1;
  293. }
  294. #endif
  295. static int
  296. __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
  297. {
  298. unsigned int op[2];
  299. void *ip = (void *)rec->ip;
  300. /* read where this goes */
  301. if (probe_kernel_read(op, ip, sizeof(op)))
  302. return -EFAULT;
  303. if (!expected_nop_sequence(ip, op[0], op[1])) {
  304. pr_err("Unexpected call sequence at %p: %x %x\n",
  305. ip, op[0], op[1]);
  306. return -EINVAL;
  307. }
  308. /* If we never set up a trampoline to ftrace_caller, then bail */
  309. if (!rec->arch.mod->arch.tramp) {
  310. pr_err("No ftrace trampoline\n");
  311. return -EINVAL;
  312. }
  313. /* Ensure branch is within 24 bits */
  314. if (!create_branch(ip, rec->arch.mod->arch.tramp, BRANCH_SET_LINK)) {
  315. pr_err("Branch out of range\n");
  316. return -EINVAL;
  317. }
  318. if (patch_branch(ip, rec->arch.mod->arch.tramp, BRANCH_SET_LINK)) {
  319. pr_err("REL24 out of range!\n");
  320. return -EINVAL;
  321. }
  322. return 0;
  323. }
  324. #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
  325. int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
  326. unsigned long addr)
  327. {
  328. return ftrace_make_call(rec, addr);
  329. }
  330. #endif
  331. #else /* !CONFIG_PPC64: */
  332. static int
  333. __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
  334. {
  335. unsigned int op;
  336. unsigned long ip = rec->ip;
  337. /* read where this goes */
  338. if (probe_kernel_read(&op, (void *)ip, MCOUNT_INSN_SIZE))
  339. return -EFAULT;
  340. /* It should be pointing to a nop */
  341. if (op != PPC_INST_NOP) {
  342. pr_err("Expected NOP but have %x\n", op);
  343. return -EINVAL;
  344. }
  345. /* If we never set up a trampoline to ftrace_caller, then bail */
  346. if (!rec->arch.mod->arch.tramp) {
  347. pr_err("No ftrace trampoline\n");
  348. return -EINVAL;
  349. }
  350. /* create the branch to the trampoline */
  351. op = create_branch((unsigned int *)ip,
  352. rec->arch.mod->arch.tramp, BRANCH_SET_LINK);
  353. if (!op) {
  354. pr_err("REL24 out of range!\n");
  355. return -EINVAL;
  356. }
  357. pr_devel("write to %lx\n", rec->ip);
  358. if (patch_instruction((unsigned int *)ip, op))
  359. return -EPERM;
  360. return 0;
  361. }
  362. #endif /* CONFIG_PPC64 */
  363. #endif /* CONFIG_MODULES */
  364. int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
  365. {
  366. unsigned long ip = rec->ip;
  367. unsigned int old, new;
  368. /*
  369. * If the calling address is more that 24 bits away,
  370. * then we had to use a trampoline to make the call.
  371. * Otherwise just update the call site.
  372. */
  373. if (test_24bit_addr(ip, addr)) {
  374. /* within range */
  375. old = PPC_INST_NOP;
  376. new = ftrace_call_replace(ip, addr, 1);
  377. return ftrace_modify_code(ip, old, new);
  378. }
  379. #ifdef CONFIG_MODULES
  380. /*
  381. * Out of range jumps are called from modules.
  382. * Being that we are converting from nop, it had better
  383. * already have a module defined.
  384. */
  385. if (!rec->arch.mod) {
  386. pr_err("No module loaded\n");
  387. return -EINVAL;
  388. }
  389. return __ftrace_make_call(rec, addr);
  390. #else
  391. /* We should not get here without modules */
  392. return -EINVAL;
  393. #endif /* CONFIG_MODULES */
  394. }
  395. int ftrace_update_ftrace_func(ftrace_func_t func)
  396. {
  397. unsigned long ip = (unsigned long)(&ftrace_call);
  398. unsigned int old, new;
  399. int ret;
  400. old = *(unsigned int *)&ftrace_call;
  401. new = ftrace_call_replace(ip, (unsigned long)func, 1);
  402. ret = ftrace_modify_code(ip, old, new);
  403. return ret;
  404. }
  405. static int __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
  406. {
  407. unsigned long ftrace_addr = (unsigned long)FTRACE_ADDR;
  408. int ret;
  409. ret = ftrace_update_record(rec, enable);
  410. switch (ret) {
  411. case FTRACE_UPDATE_IGNORE:
  412. return 0;
  413. case FTRACE_UPDATE_MAKE_CALL:
  414. return ftrace_make_call(rec, ftrace_addr);
  415. case FTRACE_UPDATE_MAKE_NOP:
  416. return ftrace_make_nop(NULL, rec, ftrace_addr);
  417. }
  418. return 0;
  419. }
  420. void ftrace_replace_code(int enable)
  421. {
  422. struct ftrace_rec_iter *iter;
  423. struct dyn_ftrace *rec;
  424. int ret;
  425. for (iter = ftrace_rec_iter_start(); iter;
  426. iter = ftrace_rec_iter_next(iter)) {
  427. rec = ftrace_rec_iter_record(iter);
  428. ret = __ftrace_replace_code(rec, enable);
  429. if (ret) {
  430. ftrace_bug(ret, rec);
  431. return;
  432. }
  433. }
  434. }
  435. /*
  436. * Use the default ftrace_modify_all_code, but without
  437. * stop_machine().
  438. */
  439. void arch_ftrace_update_code(int command)
  440. {
  441. ftrace_modify_all_code(command);
  442. }
  443. int __init ftrace_dyn_arch_init(void)
  444. {
  445. return 0;
  446. }
  447. #endif /* CONFIG_DYNAMIC_FTRACE */
  448. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  449. #ifdef CONFIG_DYNAMIC_FTRACE
  450. extern void ftrace_graph_call(void);
  451. extern void ftrace_graph_stub(void);
  452. int ftrace_enable_ftrace_graph_caller(void)
  453. {
  454. unsigned long ip = (unsigned long)(&ftrace_graph_call);
  455. unsigned long addr = (unsigned long)(&ftrace_graph_caller);
  456. unsigned long stub = (unsigned long)(&ftrace_graph_stub);
  457. unsigned int old, new;
  458. old = ftrace_call_replace(ip, stub, 0);
  459. new = ftrace_call_replace(ip, addr, 0);
  460. return ftrace_modify_code(ip, old, new);
  461. }
  462. int ftrace_disable_ftrace_graph_caller(void)
  463. {
  464. unsigned long ip = (unsigned long)(&ftrace_graph_call);
  465. unsigned long addr = (unsigned long)(&ftrace_graph_caller);
  466. unsigned long stub = (unsigned long)(&ftrace_graph_stub);
  467. unsigned int old, new;
  468. old = ftrace_call_replace(ip, addr, 0);
  469. new = ftrace_call_replace(ip, stub, 0);
  470. return ftrace_modify_code(ip, old, new);
  471. }
  472. #endif /* CONFIG_DYNAMIC_FTRACE */
  473. /*
  474. * Hook the return address and push it in the stack of return addrs
  475. * in current thread info. Return the address we want to divert to.
  476. */
  477. unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip)
  478. {
  479. struct ftrace_graph_ent trace;
  480. unsigned long return_hooker;
  481. if (unlikely(ftrace_graph_is_dead()))
  482. goto out;
  483. if (unlikely(atomic_read(&current->tracing_graph_pause)))
  484. goto out;
  485. return_hooker = ppc_function_entry(return_to_handler);
  486. trace.func = ip;
  487. trace.depth = current->curr_ret_stack + 1;
  488. /* Only trace if the calling function expects to */
  489. if (!ftrace_graph_entry(&trace))
  490. goto out;
  491. if (ftrace_push_return_trace(parent, ip, &trace.depth, 0,
  492. NULL) == -EBUSY)
  493. goto out;
  494. parent = return_hooker;
  495. out:
  496. return parent;
  497. }
  498. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
  499. #if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_PPC64)
  500. unsigned long __init arch_syscall_addr(int nr)
  501. {
  502. return sys_call_table[nr*2];
  503. }
  504. #endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_PPC64 */
  505. #ifdef PPC64_ELF_ABI_v1
  506. char *arch_ftrace_match_adjust(char *str, const char *search)
  507. {
  508. if (str[0] == '.' && search[0] != '.')
  509. return str + 1;
  510. else
  511. return str;
  512. }
  513. #endif /* PPC64_ELF_ABI_v1 */