ftrace.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Code for replacing ftrace calls with jumps.
  4. *
  5. * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  6. *
  7. * Thanks goes out to P.A. Semi, Inc for supplying me with a PPC64 box.
  8. *
  9. * Added function graph tracer code, taken from x86 that was written
  10. * by Frederic Weisbecker, and ported to PPC by Steven Rostedt.
  11. *
  12. */
  13. #define pr_fmt(fmt) "ftrace-powerpc: " fmt
  14. #include <linux/spinlock.h>
  15. #include <linux/hardirq.h>
  16. #include <linux/uaccess.h>
  17. #include <linux/module.h>
  18. #include <linux/ftrace.h>
  19. #include <linux/percpu.h>
  20. #include <linux/init.h>
  21. #include <linux/list.h>
  22. #include <asm/asm-prototypes.h>
  23. #include <asm/cacheflush.h>
  24. #include <asm/code-patching.h>
  25. #include <asm/ftrace.h>
  26. #include <asm/syscall.h>
  27. #ifdef CONFIG_DYNAMIC_FTRACE
  28. /*
  29. * We generally only have a single long_branch tramp and at most 2 or 3 plt
  30. * tramps generated. But, we don't use the plt tramps currently. We also allot
  31. * 2 tramps after .text and .init.text. So, we only end up with around 3 usable
  32. * tramps in total. Set aside 8 just to be sure.
  33. */
  34. #define NUM_FTRACE_TRAMPS 8
  35. static unsigned long ftrace_tramps[NUM_FTRACE_TRAMPS];
  36. static unsigned int
  37. ftrace_call_replace(unsigned long ip, unsigned long addr, int link)
  38. {
  39. unsigned int op;
  40. addr = ppc_function_entry((void *)addr);
  41. /* if (link) set op to 'bl' else 'b' */
  42. op = create_branch((unsigned int *)ip, addr, link ? 1 : 0);
  43. return op;
  44. }
  45. static int
  46. ftrace_modify_code(unsigned long ip, unsigned int old, unsigned int new)
  47. {
  48. unsigned int replaced;
  49. /*
  50. * Note:
  51. * We are paranoid about modifying text, as if a bug was to happen, it
  52. * could cause us to read or write to someplace that could cause harm.
  53. * Carefully read and modify the code with probe_kernel_*(), and make
  54. * sure what we read is what we expected it to be before modifying it.
  55. */
  56. /* read the text we want to modify */
  57. if (probe_kernel_read(&replaced, (void *)ip, MCOUNT_INSN_SIZE))
  58. return -EFAULT;
  59. /* Make sure it is what we expect it to be */
  60. if (replaced != old) {
  61. pr_err("%p: replaced (%#x) != old (%#x)",
  62. (void *)ip, replaced, old);
  63. return -EINVAL;
  64. }
  65. /* replace the text with the new text */
  66. if (patch_instruction((unsigned int *)ip, new))
  67. return -EPERM;
  68. return 0;
  69. }
  70. /*
  71. * Helper functions that are the same for both PPC64 and PPC32.
  72. */
  73. static int test_24bit_addr(unsigned long ip, unsigned long addr)
  74. {
  75. addr = ppc_function_entry((void *)addr);
  76. /* use the create_branch to verify that this offset can be branched */
  77. return create_branch((unsigned int *)ip, addr, 0);
  78. }
  79. static int is_bl_op(unsigned int op)
  80. {
  81. return (op & 0xfc000003) == 0x48000001;
  82. }
  83. static int is_b_op(unsigned int op)
  84. {
  85. return (op & 0xfc000003) == 0x48000000;
  86. }
  87. static unsigned long find_bl_target(unsigned long ip, unsigned int op)
  88. {
  89. static int offset;
  90. offset = (op & 0x03fffffc);
  91. /* make it signed */
  92. if (offset & 0x02000000)
  93. offset |= 0xfe000000;
  94. return ip + (long)offset;
  95. }
  96. #ifdef CONFIG_MODULES
  97. #ifdef CONFIG_PPC64
  98. static int
  99. __ftrace_make_nop(struct module *mod,
  100. struct dyn_ftrace *rec, unsigned long addr)
  101. {
  102. unsigned long entry, ptr, tramp;
  103. unsigned long ip = rec->ip;
  104. unsigned int op, pop;
  105. /* read where this goes */
  106. if (probe_kernel_read(&op, (void *)ip, sizeof(int))) {
  107. pr_err("Fetching opcode failed.\n");
  108. return -EFAULT;
  109. }
  110. /* Make sure that that this is still a 24bit jump */
  111. if (!is_bl_op(op)) {
  112. pr_err("Not expected bl: opcode is %x\n", op);
  113. return -EINVAL;
  114. }
  115. /* lets find where the pointer goes */
  116. tramp = find_bl_target(ip, op);
  117. pr_devel("ip:%lx jumps to %lx", ip, tramp);
  118. if (module_trampoline_target(mod, tramp, &ptr)) {
  119. pr_err("Failed to get trampoline target\n");
  120. return -EFAULT;
  121. }
  122. pr_devel("trampoline target %lx", ptr);
  123. entry = ppc_global_function_entry((void *)addr);
  124. /* This should match what was called */
  125. if (ptr != entry) {
  126. pr_err("addr %lx does not match expected %lx\n", ptr, entry);
  127. return -EINVAL;
  128. }
  129. #ifdef CONFIG_MPROFILE_KERNEL
  130. /* When using -mkernel_profile there is no load to jump over */
  131. pop = PPC_INST_NOP;
  132. if (probe_kernel_read(&op, (void *)(ip - 4), 4)) {
  133. pr_err("Fetching instruction at %lx failed.\n", ip - 4);
  134. return -EFAULT;
  135. }
  136. /* We expect either a mflr r0, or a std r0, LRSAVE(r1) */
  137. if (op != PPC_INST_MFLR && op != PPC_INST_STD_LR) {
  138. pr_err("Unexpected instruction %08x around bl _mcount\n", op);
  139. return -EINVAL;
  140. }
  141. #else
  142. /*
  143. * Our original call site looks like:
  144. *
  145. * bl <tramp>
  146. * ld r2,XX(r1)
  147. *
  148. * Milton Miller pointed out that we can not simply nop the branch.
  149. * If a task was preempted when calling a trace function, the nops
  150. * will remove the way to restore the TOC in r2 and the r2 TOC will
  151. * get corrupted.
  152. *
  153. * Use a b +8 to jump over the load.
  154. */
  155. pop = PPC_INST_BRANCH | 8; /* b +8 */
  156. /*
  157. * Check what is in the next instruction. We can see ld r2,40(r1), but
  158. * on first pass after boot we will see mflr r0.
  159. */
  160. if (probe_kernel_read(&op, (void *)(ip+4), MCOUNT_INSN_SIZE)) {
  161. pr_err("Fetching op failed.\n");
  162. return -EFAULT;
  163. }
  164. if (op != PPC_INST_LD_TOC) {
  165. pr_err("Expected %08x found %08x\n", PPC_INST_LD_TOC, op);
  166. return -EINVAL;
  167. }
  168. #endif /* CONFIG_MPROFILE_KERNEL */
  169. if (patch_instruction((unsigned int *)ip, pop)) {
  170. pr_err("Patching NOP failed.\n");
  171. return -EPERM;
  172. }
  173. return 0;
  174. }
  175. #else /* !PPC64 */
  176. static int
  177. __ftrace_make_nop(struct module *mod,
  178. struct dyn_ftrace *rec, unsigned long addr)
  179. {
  180. unsigned int op;
  181. unsigned int jmp[4];
  182. unsigned long ip = rec->ip;
  183. unsigned long tramp;
  184. if (probe_kernel_read(&op, (void *)ip, MCOUNT_INSN_SIZE))
  185. return -EFAULT;
  186. /* Make sure that that this is still a 24bit jump */
  187. if (!is_bl_op(op)) {
  188. pr_err("Not expected bl: opcode is %x\n", op);
  189. return -EINVAL;
  190. }
  191. /* lets find where the pointer goes */
  192. tramp = find_bl_target(ip, op);
  193. /*
  194. * On PPC32 the trampoline looks like:
  195. * 0x3d, 0x80, 0x00, 0x00 lis r12,sym@ha
  196. * 0x39, 0x8c, 0x00, 0x00 addi r12,r12,sym@l
  197. * 0x7d, 0x89, 0x03, 0xa6 mtctr r12
  198. * 0x4e, 0x80, 0x04, 0x20 bctr
  199. */
  200. pr_devel("ip:%lx jumps to %lx", ip, tramp);
  201. /* Find where the trampoline jumps to */
  202. if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) {
  203. pr_err("Failed to read %lx\n", tramp);
  204. return -EFAULT;
  205. }
  206. pr_devel(" %08x %08x ", jmp[0], jmp[1]);
  207. /* verify that this is what we expect it to be */
  208. if (((jmp[0] & 0xffff0000) != 0x3d800000) ||
  209. ((jmp[1] & 0xffff0000) != 0x398c0000) ||
  210. (jmp[2] != 0x7d8903a6) ||
  211. (jmp[3] != 0x4e800420)) {
  212. pr_err("Not a trampoline\n");
  213. return -EINVAL;
  214. }
  215. tramp = (jmp[1] & 0xffff) |
  216. ((jmp[0] & 0xffff) << 16);
  217. if (tramp & 0x8000)
  218. tramp -= 0x10000;
  219. pr_devel(" %lx ", tramp);
  220. if (tramp != addr) {
  221. pr_err("Trampoline location %08lx does not match addr\n",
  222. tramp);
  223. return -EINVAL;
  224. }
  225. op = PPC_INST_NOP;
  226. if (patch_instruction((unsigned int *)ip, op))
  227. return -EPERM;
  228. return 0;
  229. }
  230. #endif /* PPC64 */
  231. #endif /* CONFIG_MODULES */
  232. static unsigned long find_ftrace_tramp(unsigned long ip)
  233. {
  234. int i;
  235. /*
  236. * We have the compiler generated long_branch tramps at the end
  237. * and we prefer those
  238. */
  239. for (i = NUM_FTRACE_TRAMPS - 1; i >= 0; i--)
  240. if (!ftrace_tramps[i])
  241. continue;
  242. else if (create_branch((void *)ip, ftrace_tramps[i], 0))
  243. return ftrace_tramps[i];
  244. return 0;
  245. }
  246. static int add_ftrace_tramp(unsigned long tramp)
  247. {
  248. int i;
  249. for (i = 0; i < NUM_FTRACE_TRAMPS; i++)
  250. if (!ftrace_tramps[i]) {
  251. ftrace_tramps[i] = tramp;
  252. return 0;
  253. }
  254. return -1;
  255. }
  256. /*
  257. * If this is a compiler generated long_branch trampoline (essentially, a
  258. * trampoline that has a branch to _mcount()), we re-write the branch to
  259. * instead go to ftrace_[regs_]caller() and note down the location of this
  260. * trampoline.
  261. */
  262. static int setup_mcount_compiler_tramp(unsigned long tramp)
  263. {
  264. int i, op;
  265. unsigned long ptr;
  266. static unsigned long ftrace_plt_tramps[NUM_FTRACE_TRAMPS];
  267. /* Is this a known long jump tramp? */
  268. for (i = 0; i < NUM_FTRACE_TRAMPS; i++)
  269. if (!ftrace_tramps[i])
  270. break;
  271. else if (ftrace_tramps[i] == tramp)
  272. return 0;
  273. /* Is this a known plt tramp? */
  274. for (i = 0; i < NUM_FTRACE_TRAMPS; i++)
  275. if (!ftrace_plt_tramps[i])
  276. break;
  277. else if (ftrace_plt_tramps[i] == tramp)
  278. return -1;
  279. /* New trampoline -- read where this goes */
  280. if (probe_kernel_read(&op, (void *)tramp, sizeof(int))) {
  281. pr_debug("Fetching opcode failed.\n");
  282. return -1;
  283. }
  284. /* Is this a 24 bit branch? */
  285. if (!is_b_op(op)) {
  286. pr_debug("Trampoline is not a long branch tramp.\n");
  287. return -1;
  288. }
  289. /* lets find where the pointer goes */
  290. ptr = find_bl_target(tramp, op);
  291. if (ptr != ppc_global_function_entry((void *)_mcount)) {
  292. pr_debug("Trampoline target %p is not _mcount\n", (void *)ptr);
  293. return -1;
  294. }
  295. /* Let's re-write the tramp to go to ftrace_[regs_]caller */
  296. #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
  297. ptr = ppc_global_function_entry((void *)ftrace_regs_caller);
  298. #else
  299. ptr = ppc_global_function_entry((void *)ftrace_caller);
  300. #endif
  301. if (!create_branch((void *)tramp, ptr, 0)) {
  302. pr_debug("%ps is not reachable from existing mcount tramp\n",
  303. (void *)ptr);
  304. return -1;
  305. }
  306. if (patch_branch((unsigned int *)tramp, ptr, 0)) {
  307. pr_debug("REL24 out of range!\n");
  308. return -1;
  309. }
  310. if (add_ftrace_tramp(tramp)) {
  311. pr_debug("No tramp locations left\n");
  312. return -1;
  313. }
  314. return 0;
  315. }
  316. static int __ftrace_make_nop_kernel(struct dyn_ftrace *rec, unsigned long addr)
  317. {
  318. unsigned long tramp, ip = rec->ip;
  319. unsigned int op;
  320. /* Read where this goes */
  321. if (probe_kernel_read(&op, (void *)ip, sizeof(int))) {
  322. pr_err("Fetching opcode failed.\n");
  323. return -EFAULT;
  324. }
  325. /* Make sure that that this is still a 24bit jump */
  326. if (!is_bl_op(op)) {
  327. pr_err("Not expected bl: opcode is %x\n", op);
  328. return -EINVAL;
  329. }
  330. /* Let's find where the pointer goes */
  331. tramp = find_bl_target(ip, op);
  332. pr_devel("ip:%lx jumps to %lx", ip, tramp);
  333. if (setup_mcount_compiler_tramp(tramp)) {
  334. /* Are other trampolines reachable? */
  335. if (!find_ftrace_tramp(ip)) {
  336. pr_err("No ftrace trampolines reachable from %ps\n",
  337. (void *)ip);
  338. return -EINVAL;
  339. }
  340. }
  341. if (patch_instruction((unsigned int *)ip, PPC_INST_NOP)) {
  342. pr_err("Patching NOP failed.\n");
  343. return -EPERM;
  344. }
  345. return 0;
  346. }
  347. int ftrace_make_nop(struct module *mod,
  348. struct dyn_ftrace *rec, unsigned long addr)
  349. {
  350. unsigned long ip = rec->ip;
  351. unsigned int old, new;
  352. /*
  353. * If the calling address is more that 24 bits away,
  354. * then we had to use a trampoline to make the call.
  355. * Otherwise just update the call site.
  356. */
  357. if (test_24bit_addr(ip, addr)) {
  358. /* within range */
  359. old = ftrace_call_replace(ip, addr, 1);
  360. new = PPC_INST_NOP;
  361. return ftrace_modify_code(ip, old, new);
  362. } else if (core_kernel_text(ip))
  363. return __ftrace_make_nop_kernel(rec, addr);
  364. #ifdef CONFIG_MODULES
  365. /*
  366. * Out of range jumps are called from modules.
  367. * We should either already have a pointer to the module
  368. * or it has been passed in.
  369. */
  370. if (!rec->arch.mod) {
  371. if (!mod) {
  372. pr_err("No module loaded addr=%lx\n", addr);
  373. return -EFAULT;
  374. }
  375. rec->arch.mod = mod;
  376. } else if (mod) {
  377. if (mod != rec->arch.mod) {
  378. pr_err("Record mod %p not equal to passed in mod %p\n",
  379. rec->arch.mod, mod);
  380. return -EINVAL;
  381. }
  382. /* nothing to do if mod == rec->arch.mod */
  383. } else
  384. mod = rec->arch.mod;
  385. return __ftrace_make_nop(mod, rec, addr);
  386. #else
  387. /* We should not get here without modules */
  388. return -EINVAL;
  389. #endif /* CONFIG_MODULES */
  390. }
  391. #ifdef CONFIG_MODULES
  392. #ifdef CONFIG_PPC64
  393. /*
  394. * Examine the existing instructions for __ftrace_make_call.
  395. * They should effectively be a NOP, and follow formal constraints,
  396. * depending on the ABI. Return false if they don't.
  397. */
  398. #ifndef CONFIG_MPROFILE_KERNEL
  399. static int
  400. expected_nop_sequence(void *ip, unsigned int op0, unsigned int op1)
  401. {
  402. /*
  403. * We expect to see:
  404. *
  405. * b +8
  406. * ld r2,XX(r1)
  407. *
  408. * The load offset is different depending on the ABI. For simplicity
  409. * just mask it out when doing the compare.
  410. */
  411. if ((op0 != 0x48000008) || ((op1 & 0xffff0000) != 0xe8410000))
  412. return 0;
  413. return 1;
  414. }
  415. #else
  416. static int
  417. expected_nop_sequence(void *ip, unsigned int op0, unsigned int op1)
  418. {
  419. /* look for patched "NOP" on ppc64 with -mprofile-kernel */
  420. if (op0 != PPC_INST_NOP)
  421. return 0;
  422. return 1;
  423. }
  424. #endif
  425. static int
  426. __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
  427. {
  428. unsigned int op[2];
  429. void *ip = (void *)rec->ip;
  430. unsigned long entry, ptr, tramp;
  431. struct module *mod = rec->arch.mod;
  432. /* read where this goes */
  433. if (probe_kernel_read(op, ip, sizeof(op)))
  434. return -EFAULT;
  435. if (!expected_nop_sequence(ip, op[0], op[1])) {
  436. pr_err("Unexpected call sequence at %p: %x %x\n",
  437. ip, op[0], op[1]);
  438. return -EINVAL;
  439. }
  440. /* If we never set up ftrace trampoline(s), then bail */
  441. #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
  442. if (!mod->arch.tramp || !mod->arch.tramp_regs) {
  443. #else
  444. if (!mod->arch.tramp) {
  445. #endif
  446. pr_err("No ftrace trampoline\n");
  447. return -EINVAL;
  448. }
  449. #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
  450. if (rec->flags & FTRACE_FL_REGS)
  451. tramp = mod->arch.tramp_regs;
  452. else
  453. #endif
  454. tramp = mod->arch.tramp;
  455. if (module_trampoline_target(mod, tramp, &ptr)) {
  456. pr_err("Failed to get trampoline target\n");
  457. return -EFAULT;
  458. }
  459. pr_devel("trampoline target %lx", ptr);
  460. entry = ppc_global_function_entry((void *)addr);
  461. /* This should match what was called */
  462. if (ptr != entry) {
  463. pr_err("addr %lx does not match expected %lx\n", ptr, entry);
  464. return -EINVAL;
  465. }
  466. /* Ensure branch is within 24 bits */
  467. if (!create_branch(ip, tramp, BRANCH_SET_LINK)) {
  468. pr_err("Branch out of range\n");
  469. return -EINVAL;
  470. }
  471. if (patch_branch(ip, tramp, BRANCH_SET_LINK)) {
  472. pr_err("REL24 out of range!\n");
  473. return -EINVAL;
  474. }
  475. return 0;
  476. }
  477. #else /* !CONFIG_PPC64: */
  478. static int
  479. __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
  480. {
  481. unsigned int op;
  482. unsigned long ip = rec->ip;
  483. /* read where this goes */
  484. if (probe_kernel_read(&op, (void *)ip, MCOUNT_INSN_SIZE))
  485. return -EFAULT;
  486. /* It should be pointing to a nop */
  487. if (op != PPC_INST_NOP) {
  488. pr_err("Expected NOP but have %x\n", op);
  489. return -EINVAL;
  490. }
  491. /* If we never set up a trampoline to ftrace_caller, then bail */
  492. if (!rec->arch.mod->arch.tramp) {
  493. pr_err("No ftrace trampoline\n");
  494. return -EINVAL;
  495. }
  496. /* create the branch to the trampoline */
  497. op = create_branch((unsigned int *)ip,
  498. rec->arch.mod->arch.tramp, BRANCH_SET_LINK);
  499. if (!op) {
  500. pr_err("REL24 out of range!\n");
  501. return -EINVAL;
  502. }
  503. pr_devel("write to %lx\n", rec->ip);
  504. if (patch_instruction((unsigned int *)ip, op))
  505. return -EPERM;
  506. return 0;
  507. }
  508. #endif /* CONFIG_PPC64 */
  509. #endif /* CONFIG_MODULES */
  510. static int __ftrace_make_call_kernel(struct dyn_ftrace *rec, unsigned long addr)
  511. {
  512. unsigned int op;
  513. void *ip = (void *)rec->ip;
  514. unsigned long tramp, entry, ptr;
  515. /* Make sure we're being asked to patch branch to a known ftrace addr */
  516. entry = ppc_global_function_entry((void *)ftrace_caller);
  517. ptr = ppc_global_function_entry((void *)addr);
  518. if (ptr != entry) {
  519. #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
  520. entry = ppc_global_function_entry((void *)ftrace_regs_caller);
  521. if (ptr != entry) {
  522. #endif
  523. pr_err("Unknown ftrace addr to patch: %ps\n", (void *)ptr);
  524. return -EINVAL;
  525. #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
  526. }
  527. #endif
  528. }
  529. /* Make sure we have a nop */
  530. if (probe_kernel_read(&op, ip, sizeof(op))) {
  531. pr_err("Unable to read ftrace location %p\n", ip);
  532. return -EFAULT;
  533. }
  534. if (op != PPC_INST_NOP) {
  535. pr_err("Unexpected call sequence at %p: %x\n", ip, op);
  536. return -EINVAL;
  537. }
  538. tramp = find_ftrace_tramp((unsigned long)ip);
  539. if (!tramp) {
  540. pr_err("No ftrace trampolines reachable from %ps\n", ip);
  541. return -EINVAL;
  542. }
  543. if (patch_branch(ip, tramp, BRANCH_SET_LINK)) {
  544. pr_err("Error patching branch to ftrace tramp!\n");
  545. return -EINVAL;
  546. }
  547. return 0;
  548. }
  549. int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
  550. {
  551. unsigned long ip = rec->ip;
  552. unsigned int old, new;
  553. /*
  554. * If the calling address is more that 24 bits away,
  555. * then we had to use a trampoline to make the call.
  556. * Otherwise just update the call site.
  557. */
  558. if (test_24bit_addr(ip, addr)) {
  559. /* within range */
  560. old = PPC_INST_NOP;
  561. new = ftrace_call_replace(ip, addr, 1);
  562. return ftrace_modify_code(ip, old, new);
  563. } else if (core_kernel_text(ip))
  564. return __ftrace_make_call_kernel(rec, addr);
  565. #ifdef CONFIG_MODULES
  566. /*
  567. * Out of range jumps are called from modules.
  568. * Being that we are converting from nop, it had better
  569. * already have a module defined.
  570. */
  571. if (!rec->arch.mod) {
  572. pr_err("No module loaded\n");
  573. return -EINVAL;
  574. }
  575. return __ftrace_make_call(rec, addr);
  576. #else
  577. /* We should not get here without modules */
  578. return -EINVAL;
  579. #endif /* CONFIG_MODULES */
  580. }
  581. #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
  582. #ifdef CONFIG_MODULES
  583. static int
  584. __ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
  585. unsigned long addr)
  586. {
  587. unsigned int op;
  588. unsigned long ip = rec->ip;
  589. unsigned long entry, ptr, tramp;
  590. struct module *mod = rec->arch.mod;
  591. /* If we never set up ftrace trampolines, then bail */
  592. if (!mod->arch.tramp || !mod->arch.tramp_regs) {
  593. pr_err("No ftrace trampoline\n");
  594. return -EINVAL;
  595. }
  596. /* read where this goes */
  597. if (probe_kernel_read(&op, (void *)ip, sizeof(int))) {
  598. pr_err("Fetching opcode failed.\n");
  599. return -EFAULT;
  600. }
  601. /* Make sure that that this is still a 24bit jump */
  602. if (!is_bl_op(op)) {
  603. pr_err("Not expected bl: opcode is %x\n", op);
  604. return -EINVAL;
  605. }
  606. /* lets find where the pointer goes */
  607. tramp = find_bl_target(ip, op);
  608. entry = ppc_global_function_entry((void *)old_addr);
  609. pr_devel("ip:%lx jumps to %lx", ip, tramp);
  610. if (tramp != entry) {
  611. /* old_addr is not within range, so we must have used a trampoline */
  612. if (module_trampoline_target(mod, tramp, &ptr)) {
  613. pr_err("Failed to get trampoline target\n");
  614. return -EFAULT;
  615. }
  616. pr_devel("trampoline target %lx", ptr);
  617. /* This should match what was called */
  618. if (ptr != entry) {
  619. pr_err("addr %lx does not match expected %lx\n", ptr, entry);
  620. return -EINVAL;
  621. }
  622. }
  623. /* The new target may be within range */
  624. if (test_24bit_addr(ip, addr)) {
  625. /* within range */
  626. if (patch_branch((unsigned int *)ip, addr, BRANCH_SET_LINK)) {
  627. pr_err("REL24 out of range!\n");
  628. return -EINVAL;
  629. }
  630. return 0;
  631. }
  632. if (rec->flags & FTRACE_FL_REGS)
  633. tramp = mod->arch.tramp_regs;
  634. else
  635. tramp = mod->arch.tramp;
  636. if (module_trampoline_target(mod, tramp, &ptr)) {
  637. pr_err("Failed to get trampoline target\n");
  638. return -EFAULT;
  639. }
  640. pr_devel("trampoline target %lx", ptr);
  641. entry = ppc_global_function_entry((void *)addr);
  642. /* This should match what was called */
  643. if (ptr != entry) {
  644. pr_err("addr %lx does not match expected %lx\n", ptr, entry);
  645. return -EINVAL;
  646. }
  647. /* Ensure branch is within 24 bits */
  648. if (!create_branch((unsigned int *)ip, tramp, BRANCH_SET_LINK)) {
  649. pr_err("Branch out of range\n");
  650. return -EINVAL;
  651. }
  652. if (patch_branch((unsigned int *)ip, tramp, BRANCH_SET_LINK)) {
  653. pr_err("REL24 out of range!\n");
  654. return -EINVAL;
  655. }
  656. return 0;
  657. }
  658. #endif
  659. int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
  660. unsigned long addr)
  661. {
  662. unsigned long ip = rec->ip;
  663. unsigned int old, new;
  664. /*
  665. * If the calling address is more that 24 bits away,
  666. * then we had to use a trampoline to make the call.
  667. * Otherwise just update the call site.
  668. */
  669. if (test_24bit_addr(ip, addr) && test_24bit_addr(ip, old_addr)) {
  670. /* within range */
  671. old = ftrace_call_replace(ip, old_addr, 1);
  672. new = ftrace_call_replace(ip, addr, 1);
  673. return ftrace_modify_code(ip, old, new);
  674. } else if (core_kernel_text(ip)) {
  675. /*
  676. * We always patch out of range locations to go to the regs
  677. * variant, so there is nothing to do here
  678. */
  679. return 0;
  680. }
  681. #ifdef CONFIG_MODULES
  682. /*
  683. * Out of range jumps are called from modules.
  684. */
  685. if (!rec->arch.mod) {
  686. pr_err("No module loaded\n");
  687. return -EINVAL;
  688. }
  689. return __ftrace_modify_call(rec, old_addr, addr);
  690. #else
  691. /* We should not get here without modules */
  692. return -EINVAL;
  693. #endif /* CONFIG_MODULES */
  694. }
  695. #endif
  696. int ftrace_update_ftrace_func(ftrace_func_t func)
  697. {
  698. unsigned long ip = (unsigned long)(&ftrace_call);
  699. unsigned int old, new;
  700. int ret;
  701. old = *(unsigned int *)&ftrace_call;
  702. new = ftrace_call_replace(ip, (unsigned long)func, 1);
  703. ret = ftrace_modify_code(ip, old, new);
  704. #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
  705. /* Also update the regs callback function */
  706. if (!ret) {
  707. ip = (unsigned long)(&ftrace_regs_call);
  708. old = *(unsigned int *)&ftrace_regs_call;
  709. new = ftrace_call_replace(ip, (unsigned long)func, 1);
  710. ret = ftrace_modify_code(ip, old, new);
  711. }
  712. #endif
  713. return ret;
  714. }
  715. /*
  716. * Use the default ftrace_modify_all_code, but without
  717. * stop_machine().
  718. */
  719. void arch_ftrace_update_code(int command)
  720. {
  721. ftrace_modify_all_code(command);
  722. }
  723. #ifdef CONFIG_PPC64
  724. #define PACATOC offsetof(struct paca_struct, kernel_toc)
  725. #define PPC_LO(v) ((v) & 0xffff)
  726. #define PPC_HI(v) (((v) >> 16) & 0xffff)
  727. #define PPC_HA(v) PPC_HI ((v) + 0x8000)
  728. extern unsigned int ftrace_tramp_text[], ftrace_tramp_init[];
  729. int __init ftrace_dyn_arch_init(void)
  730. {
  731. int i;
  732. unsigned int *tramp[] = { ftrace_tramp_text, ftrace_tramp_init };
  733. u32 stub_insns[] = {
  734. 0xe98d0000 | PACATOC, /* ld r12,PACATOC(r13) */
  735. 0x3d8c0000, /* addis r12,r12,<high> */
  736. 0x398c0000, /* addi r12,r12,<low> */
  737. 0x7d8903a6, /* mtctr r12 */
  738. 0x4e800420, /* bctr */
  739. };
  740. #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
  741. unsigned long addr = ppc_global_function_entry((void *)ftrace_regs_caller);
  742. #else
  743. unsigned long addr = ppc_global_function_entry((void *)ftrace_caller);
  744. #endif
  745. long reladdr = addr - kernel_toc_addr();
  746. if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) {
  747. pr_err("Address of %ps out of range of kernel_toc.\n",
  748. (void *)addr);
  749. return -1;
  750. }
  751. for (i = 0; i < 2; i++) {
  752. memcpy(tramp[i], stub_insns, sizeof(stub_insns));
  753. tramp[i][1] |= PPC_HA(reladdr);
  754. tramp[i][2] |= PPC_LO(reladdr);
  755. add_ftrace_tramp((unsigned long)tramp[i]);
  756. }
  757. return 0;
  758. }
  759. #else
  760. int __init ftrace_dyn_arch_init(void)
  761. {
  762. return 0;
  763. }
  764. #endif
  765. #endif /* CONFIG_DYNAMIC_FTRACE */
  766. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  767. extern void ftrace_graph_call(void);
  768. extern void ftrace_graph_stub(void);
  769. int ftrace_enable_ftrace_graph_caller(void)
  770. {
  771. unsigned long ip = (unsigned long)(&ftrace_graph_call);
  772. unsigned long addr = (unsigned long)(&ftrace_graph_caller);
  773. unsigned long stub = (unsigned long)(&ftrace_graph_stub);
  774. unsigned int old, new;
  775. old = ftrace_call_replace(ip, stub, 0);
  776. new = ftrace_call_replace(ip, addr, 0);
  777. return ftrace_modify_code(ip, old, new);
  778. }
  779. int ftrace_disable_ftrace_graph_caller(void)
  780. {
  781. unsigned long ip = (unsigned long)(&ftrace_graph_call);
  782. unsigned long addr = (unsigned long)(&ftrace_graph_caller);
  783. unsigned long stub = (unsigned long)(&ftrace_graph_stub);
  784. unsigned int old, new;
  785. old = ftrace_call_replace(ip, addr, 0);
  786. new = ftrace_call_replace(ip, stub, 0);
  787. return ftrace_modify_code(ip, old, new);
  788. }
  789. /*
  790. * Hook the return address and push it in the stack of return addrs
  791. * in current thread info. Return the address we want to divert to.
  792. */
  793. unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip)
  794. {
  795. unsigned long return_hooker;
  796. if (unlikely(ftrace_graph_is_dead()))
  797. goto out;
  798. if (unlikely(atomic_read(&current->tracing_graph_pause)))
  799. goto out;
  800. return_hooker = ppc_function_entry(return_to_handler);
  801. if (!function_graph_enter(parent, ip, 0, NULL))
  802. parent = return_hooker;
  803. out:
  804. return parent;
  805. }
  806. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
  807. #if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_PPC64)
  808. unsigned long __init arch_syscall_addr(int nr)
  809. {
  810. return sys_call_table[nr*2];
  811. }
  812. #endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_PPC64 */
  813. #ifdef PPC64_ELF_ABI_v1
  814. char *arch_ftrace_match_adjust(char *str, const char *search)
  815. {
  816. if (str[0] == '.' && search[0] != '.')
  817. return str + 1;
  818. else
  819. return str;
  820. }
  821. #endif /* PPC64_ELF_ABI_v1 */