optprobes.c 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347
  1. /*
  2. * Code for Kernel probes Jump optimization.
  3. *
  4. * Copyright 2017, Anju T, IBM Corp.
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #include <linux/kprobes.h>
  12. #include <linux/jump_label.h>
  13. #include <linux/types.h>
  14. #include <linux/slab.h>
  15. #include <linux/list.h>
  16. #include <asm/kprobes.h>
  17. #include <asm/ptrace.h>
  18. #include <asm/cacheflush.h>
  19. #include <asm/code-patching.h>
  20. #include <asm/sstep.h>
  21. #include <asm/ppc-opcode.h>
  22. #define TMPL_CALL_HDLR_IDX \
  23. (optprobe_template_call_handler - optprobe_template_entry)
  24. #define TMPL_EMULATE_IDX \
  25. (optprobe_template_call_emulate - optprobe_template_entry)
  26. #define TMPL_RET_IDX \
  27. (optprobe_template_ret - optprobe_template_entry)
  28. #define TMPL_OP_IDX \
  29. (optprobe_template_op_address - optprobe_template_entry)
  30. #define TMPL_INSN_IDX \
  31. (optprobe_template_insn - optprobe_template_entry)
  32. #define TMPL_END_IDX \
  33. (optprobe_template_end - optprobe_template_entry)
  34. DEFINE_INSN_CACHE_OPS(ppc_optinsn);
  35. static bool insn_page_in_use;
  36. static void *__ppc_alloc_insn_page(void)
  37. {
  38. if (insn_page_in_use)
  39. return NULL;
  40. insn_page_in_use = true;
  41. return &optinsn_slot;
  42. }
  43. static void __ppc_free_insn_page(void *page __maybe_unused)
  44. {
  45. insn_page_in_use = false;
  46. }
  47. struct kprobe_insn_cache kprobe_ppc_optinsn_slots = {
  48. .mutex = __MUTEX_INITIALIZER(kprobe_ppc_optinsn_slots.mutex),
  49. .pages = LIST_HEAD_INIT(kprobe_ppc_optinsn_slots.pages),
  50. /* insn_size initialized later */
  51. .alloc = __ppc_alloc_insn_page,
  52. .free = __ppc_free_insn_page,
  53. .nr_garbage = 0,
  54. };
  55. /*
  56. * Check if we can optimize this probe. Returns NIP post-emulation if this can
  57. * be optimized and 0 otherwise.
  58. */
  59. static unsigned long can_optimize(struct kprobe *p)
  60. {
  61. struct pt_regs regs;
  62. struct instruction_op op;
  63. unsigned long nip = 0;
  64. /*
  65. * kprobe placed for kretprobe during boot time
  66. * has a 'nop' instruction, which can be emulated.
  67. * So further checks can be skipped.
  68. */
  69. if (p->addr == (kprobe_opcode_t *)&kretprobe_trampoline)
  70. return (unsigned long)p->addr + sizeof(kprobe_opcode_t);
  71. /*
  72. * We only support optimizing kernel addresses, but not
  73. * module addresses.
  74. *
  75. * FIXME: Optimize kprobes placed in module addresses.
  76. */
  77. if (!is_kernel_addr((unsigned long)p->addr))
  78. return 0;
  79. memset(&regs, 0, sizeof(struct pt_regs));
  80. regs.nip = (unsigned long)p->addr;
  81. regs.trap = 0x0;
  82. regs.msr = MSR_KERNEL;
  83. /*
  84. * Kprobe placed in conditional branch instructions are
  85. * not optimized, as we can't predict the nip prior with
  86. * dummy pt_regs and can not ensure that the return branch
  87. * from detour buffer falls in the range of address (i.e 32MB).
  88. * A branch back from trampoline is set up in the detour buffer
  89. * to the nip returned by the analyse_instr() here.
  90. *
  91. * Ensure that the instruction is not a conditional branch,
  92. * and that can be emulated.
  93. */
  94. if (!is_conditional_branch(*p->ainsn.insn) &&
  95. analyse_instr(&op, &regs, *p->ainsn.insn))
  96. nip = regs.nip;
  97. return nip;
  98. }
  99. static void optimized_callback(struct optimized_kprobe *op,
  100. struct pt_regs *regs)
  101. {
  102. struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
  103. unsigned long flags;
  104. /* This is possible if op is under delayed unoptimizing */
  105. if (kprobe_disabled(&op->kp))
  106. return;
  107. local_irq_save(flags);
  108. hard_irq_disable();
  109. if (kprobe_running()) {
  110. kprobes_inc_nmissed_count(&op->kp);
  111. } else {
  112. __this_cpu_write(current_kprobe, &op->kp);
  113. regs->nip = (unsigned long)op->kp.addr;
  114. kcb->kprobe_status = KPROBE_HIT_ACTIVE;
  115. opt_pre_handler(&op->kp, regs);
  116. __this_cpu_write(current_kprobe, NULL);
  117. }
  118. /*
  119. * No need for an explicit __hard_irq_enable() here.
  120. * local_irq_restore() will re-enable interrupts,
  121. * if they were hard disabled.
  122. */
  123. local_irq_restore(flags);
  124. }
  125. NOKPROBE_SYMBOL(optimized_callback);
  126. void arch_remove_optimized_kprobe(struct optimized_kprobe *op)
  127. {
  128. if (op->optinsn.insn) {
  129. free_ppc_optinsn_slot(op->optinsn.insn, 1);
  130. op->optinsn.insn = NULL;
  131. }
  132. }
  133. /*
  134. * emulate_step() requires insn to be emulated as
  135. * second parameter. Load register 'r4' with the
  136. * instruction.
  137. */
  138. void patch_imm32_load_insns(unsigned int val, kprobe_opcode_t *addr)
  139. {
  140. /* addis r4,0,(insn)@h */
  141. *addr++ = PPC_INST_ADDIS | ___PPC_RT(4) |
  142. ((val >> 16) & 0xffff);
  143. /* ori r4,r4,(insn)@l */
  144. *addr = PPC_INST_ORI | ___PPC_RA(4) | ___PPC_RS(4) |
  145. (val & 0xffff);
  146. }
  147. /*
  148. * Generate instructions to load provided immediate 64-bit value
  149. * to register 'r3' and patch these instructions at 'addr'.
  150. */
  151. void patch_imm64_load_insns(unsigned long val, kprobe_opcode_t *addr)
  152. {
  153. /* lis r3,(op)@highest */
  154. *addr++ = PPC_INST_ADDIS | ___PPC_RT(3) |
  155. ((val >> 48) & 0xffff);
  156. /* ori r3,r3,(op)@higher */
  157. *addr++ = PPC_INST_ORI | ___PPC_RA(3) | ___PPC_RS(3) |
  158. ((val >> 32) & 0xffff);
  159. /* rldicr r3,r3,32,31 */
  160. *addr++ = PPC_INST_RLDICR | ___PPC_RA(3) | ___PPC_RS(3) |
  161. __PPC_SH64(32) | __PPC_ME64(31);
  162. /* oris r3,r3,(op)@h */
  163. *addr++ = PPC_INST_ORIS | ___PPC_RA(3) | ___PPC_RS(3) |
  164. ((val >> 16) & 0xffff);
  165. /* ori r3,r3,(op)@l */
  166. *addr = PPC_INST_ORI | ___PPC_RA(3) | ___PPC_RS(3) |
  167. (val & 0xffff);
  168. }
  169. int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
  170. {
  171. kprobe_opcode_t *buff, branch_op_callback, branch_emulate_step;
  172. kprobe_opcode_t *op_callback_addr, *emulate_step_addr;
  173. long b_offset;
  174. unsigned long nip;
  175. kprobe_ppc_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
  176. nip = can_optimize(p);
  177. if (!nip)
  178. return -EILSEQ;
  179. /* Allocate instruction slot for detour buffer */
  180. buff = get_ppc_optinsn_slot();
  181. if (!buff)
  182. return -ENOMEM;
  183. /*
  184. * OPTPROBE uses 'b' instruction to branch to optinsn.insn.
  185. *
  186. * The target address has to be relatively nearby, to permit use
  187. * of branch instruction in powerpc, because the address is specified
  188. * in an immediate field in the instruction opcode itself, ie 24 bits
  189. * in the opcode specify the address. Therefore the address should
  190. * be within 32MB on either side of the current instruction.
  191. */
  192. b_offset = (unsigned long)buff - (unsigned long)p->addr;
  193. if (!is_offset_in_branch_range(b_offset))
  194. goto error;
  195. /* Check if the return address is also within 32MB range */
  196. b_offset = (unsigned long)(buff + TMPL_RET_IDX) -
  197. (unsigned long)nip;
  198. if (!is_offset_in_branch_range(b_offset))
  199. goto error;
  200. /* Setup template */
  201. memcpy(buff, optprobe_template_entry,
  202. TMPL_END_IDX * sizeof(kprobe_opcode_t));
  203. /*
  204. * Fixup the template with instructions to:
  205. * 1. load the address of the actual probepoint
  206. */
  207. patch_imm64_load_insns((unsigned long)op, buff + TMPL_OP_IDX);
  208. /*
  209. * 2. branch to optimized_callback() and emulate_step()
  210. */
  211. kprobe_lookup_name("optimized_callback", op_callback_addr);
  212. kprobe_lookup_name("emulate_step", emulate_step_addr);
  213. if (!op_callback_addr || !emulate_step_addr) {
  214. WARN(1, "kprobe_lookup_name() failed\n");
  215. goto error;
  216. }
  217. branch_op_callback = create_branch((unsigned int *)buff + TMPL_CALL_HDLR_IDX,
  218. (unsigned long)op_callback_addr,
  219. BRANCH_SET_LINK);
  220. branch_emulate_step = create_branch((unsigned int *)buff + TMPL_EMULATE_IDX,
  221. (unsigned long)emulate_step_addr,
  222. BRANCH_SET_LINK);
  223. if (!branch_op_callback || !branch_emulate_step)
  224. goto error;
  225. buff[TMPL_CALL_HDLR_IDX] = branch_op_callback;
  226. buff[TMPL_EMULATE_IDX] = branch_emulate_step;
  227. /*
  228. * 3. load instruction to be emulated into relevant register, and
  229. */
  230. patch_imm32_load_insns(*p->ainsn.insn, buff + TMPL_INSN_IDX);
  231. /*
  232. * 4. branch back from trampoline
  233. */
  234. buff[TMPL_RET_IDX] = create_branch((unsigned int *)buff + TMPL_RET_IDX,
  235. (unsigned long)nip, 0);
  236. flush_icache_range((unsigned long)buff,
  237. (unsigned long)(&buff[TMPL_END_IDX]));
  238. op->optinsn.insn = buff;
  239. return 0;
  240. error:
  241. free_ppc_optinsn_slot(buff, 0);
  242. return -ERANGE;
  243. }
  244. int arch_prepared_optinsn(struct arch_optimized_insn *optinsn)
  245. {
  246. return optinsn->insn != NULL;
  247. }
  248. /*
  249. * On powerpc, Optprobes always replaces one instruction (4 bytes
  250. * aligned and 4 bytes long). It is impossible to encounter another
  251. * kprobe in this address range. So always return 0.
  252. */
  253. int arch_check_optimized_kprobe(struct optimized_kprobe *op)
  254. {
  255. return 0;
  256. }
  257. void arch_optimize_kprobes(struct list_head *oplist)
  258. {
  259. struct optimized_kprobe *op;
  260. struct optimized_kprobe *tmp;
  261. list_for_each_entry_safe(op, tmp, oplist, list) {
  262. /*
  263. * Backup instructions which will be replaced
  264. * by jump address
  265. */
  266. memcpy(op->optinsn.copied_insn, op->kp.addr,
  267. RELATIVEJUMP_SIZE);
  268. patch_instruction(op->kp.addr,
  269. create_branch((unsigned int *)op->kp.addr,
  270. (unsigned long)op->optinsn.insn, 0));
  271. list_del_init(&op->list);
  272. }
  273. }
  274. void arch_unoptimize_kprobe(struct optimized_kprobe *op)
  275. {
  276. arch_arm_kprobe(&op->kp);
  277. }
  278. void arch_unoptimize_kprobes(struct list_head *oplist,
  279. struct list_head *done_list)
  280. {
  281. struct optimized_kprobe *op;
  282. struct optimized_kprobe *tmp;
  283. list_for_each_entry_safe(op, tmp, oplist, list) {
  284. arch_unoptimize_kprobe(op);
  285. list_move(&op->list, done_list);
  286. }
  287. }
  288. int arch_within_optimized_kprobe(struct optimized_kprobe *op,
  289. unsigned long addr)
  290. {
  291. return ((unsigned long)op->kp.addr <= addr &&
  292. (unsigned long)op->kp.addr + RELATIVEJUMP_SIZE > addr);
  293. }