kprobes.c 43 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574
  1. /*
  2. * Kernel Probes (KProbes)
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  17. *
  18. * Copyright (C) IBM Corporation, 2002, 2004
  19. *
  20. * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
  21. * Probes initial implementation ( includes contributions from
  22. * Rusty Russell).
  23. * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
  24. * interface to access function arguments.
  25. * 2004-Oct Jim Keniston <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
  26. * <prasanna@in.ibm.com> adapted for x86_64 from i386.
  27. * 2005-Mar Roland McGrath <roland@redhat.com>
  28. * Fixed to handle %rip-relative addressing mode correctly.
  29. * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
  30. * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
  31. * <prasanna@in.ibm.com> added function-return probes.
  32. * 2005-May Rusty Lynch <rusty.lynch@intel.com>
  33. * Added function return probes functionality
  34. * 2006-Feb Masami Hiramatsu <hiramatu@sdl.hitachi.co.jp> added
  35. * kprobe-booster and kretprobe-booster for i386.
  36. * 2007-Dec Masami Hiramatsu <mhiramat@redhat.com> added kprobe-booster
  37. * and kretprobe-booster for x86-64
  38. * 2007-Dec Masami Hiramatsu <mhiramat@redhat.com>, Arjan van de Ven
  39. * <arjan@infradead.org> and Jim Keniston <jkenisto@us.ibm.com>
  40. * unified x86 kprobes code.
  41. */
  42. #include <linux/kprobes.h>
  43. #include <linux/ptrace.h>
  44. #include <linux/string.h>
  45. #include <linux/slab.h>
  46. #include <linux/hardirq.h>
  47. #include <linux/preempt.h>
  48. #include <linux/module.h>
  49. #include <linux/kdebug.h>
  50. #include <linux/kallsyms.h>
  51. #include <linux/ftrace.h>
  52. #include <asm/cacheflush.h>
  53. #include <asm/desc.h>
  54. #include <asm/pgtable.h>
  55. #include <asm/uaccess.h>
  56. #include <asm/alternative.h>
  57. #include <asm/insn.h>
  58. #include <asm/debugreg.h>
  59. void jprobe_return_end(void);
  60. DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
  61. DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
  62. #define stack_addr(regs) ((unsigned long *)kernel_stack_pointer(regs))
  63. #define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\
  64. (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \
  65. (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) | \
  66. (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) | \
  67. (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf)) \
  68. << (row % 32))
  69. /*
  70. * Undefined/reserved opcodes, conditional jump, Opcode Extension
  71. * Groups, and some special opcodes can not boost.
  72. * This is non-const to keep gcc from statically optimizing it out, as
  73. * variable_test_bit makes gcc think only *(unsigned long*) is used.
  74. */
  75. static u32 twobyte_is_boostable[256 / 32] = {
  76. /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
  77. /* ---------------------------------------------- */
  78. W(0x00, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0) | /* 00 */
  79. W(0x10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 10 */
  80. W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 20 */
  81. W(0x30, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 30 */
  82. W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
  83. W(0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 50 */
  84. W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1) | /* 60 */
  85. W(0x70, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1) , /* 70 */
  86. W(0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 80 */
  87. W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */
  88. W(0xa0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* a0 */
  89. W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1) , /* b0 */
  90. W(0xc0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* c0 */
  91. W(0xd0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) , /* d0 */
  92. W(0xe0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* e0 */
  93. W(0xf0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0) /* f0 */
  94. /* ----------------------------------------------- */
  95. /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
  96. };
  97. #undef W
  98. struct kretprobe_blackpoint kretprobe_blacklist[] = {
  99. {"__switch_to", }, /* This function switches only current task, but
  100. doesn't switch kernel stack.*/
  101. {NULL, NULL} /* Terminator */
  102. };
  103. const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist);
  104. static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
  105. {
  106. struct __arch_relative_insn {
  107. u8 op;
  108. s32 raddr;
  109. } __attribute__((packed)) *insn;
  110. insn = (struct __arch_relative_insn *)from;
  111. insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
  112. insn->op = op;
  113. }
  114. /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
  115. static void __kprobes synthesize_reljump(void *from, void *to)
  116. {
  117. __synthesize_relative_insn(from, to, RELATIVEJUMP_OPCODE);
  118. }
  119. /*
  120. * Skip the prefixes of the instruction.
  121. */
  122. static kprobe_opcode_t *__kprobes skip_prefixes(kprobe_opcode_t *insn)
  123. {
  124. insn_attr_t attr;
  125. attr = inat_get_opcode_attribute((insn_byte_t)*insn);
  126. while (inat_is_legacy_prefix(attr)) {
  127. insn++;
  128. attr = inat_get_opcode_attribute((insn_byte_t)*insn);
  129. }
  130. #ifdef CONFIG_X86_64
  131. if (inat_is_rex_prefix(attr))
  132. insn++;
  133. #endif
  134. return insn;
  135. }
  136. /*
  137. * Returns non-zero if opcode is boostable.
  138. * RIP relative instructions are adjusted at copying time in 64 bits mode
  139. */
  140. static int __kprobes can_boost(kprobe_opcode_t *opcodes)
  141. {
  142. kprobe_opcode_t opcode;
  143. kprobe_opcode_t *orig_opcodes = opcodes;
  144. if (search_exception_tables((unsigned long)opcodes))
  145. return 0; /* Page fault may occur on this address. */
  146. retry:
  147. if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1)
  148. return 0;
  149. opcode = *(opcodes++);
  150. /* 2nd-byte opcode */
  151. if (opcode == 0x0f) {
  152. if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1)
  153. return 0;
  154. return test_bit(*opcodes,
  155. (unsigned long *)twobyte_is_boostable);
  156. }
  157. switch (opcode & 0xf0) {
  158. #ifdef CONFIG_X86_64
  159. case 0x40:
  160. goto retry; /* REX prefix is boostable */
  161. #endif
  162. case 0x60:
  163. if (0x63 < opcode && opcode < 0x67)
  164. goto retry; /* prefixes */
  165. /* can't boost Address-size override and bound */
  166. return (opcode != 0x62 && opcode != 0x67);
  167. case 0x70:
  168. return 0; /* can't boost conditional jump */
  169. case 0xc0:
  170. /* can't boost software-interruptions */
  171. return (0xc1 < opcode && opcode < 0xcc) || opcode == 0xcf;
  172. case 0xd0:
  173. /* can boost AA* and XLAT */
  174. return (opcode == 0xd4 || opcode == 0xd5 || opcode == 0xd7);
  175. case 0xe0:
  176. /* can boost in/out and absolute jmps */
  177. return ((opcode & 0x04) || opcode == 0xea);
  178. case 0xf0:
  179. if ((opcode & 0x0c) == 0 && opcode != 0xf1)
  180. goto retry; /* lock/rep(ne) prefix */
  181. /* clear and set flags are boostable */
  182. return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe));
  183. default:
  184. /* segment override prefixes are boostable */
  185. if (opcode == 0x26 || opcode == 0x36 || opcode == 0x3e)
  186. goto retry; /* prefixes */
  187. /* CS override prefix and call are not boostable */
  188. return (opcode != 0x2e && opcode != 0x9a);
  189. }
  190. }
  191. /* Recover the probed instruction at addr for further analysis. */
  192. static int recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr)
  193. {
  194. struct kprobe *kp;
  195. kp = get_kprobe((void *)addr);
  196. if (!kp)
  197. return -EINVAL;
  198. /*
  199. * Basically, kp->ainsn.insn has an original instruction.
  200. * However, RIP-relative instruction can not do single-stepping
  201. * at different place, __copy_instruction() tweaks the displacement of
  202. * that instruction. In that case, we can't recover the instruction
  203. * from the kp->ainsn.insn.
  204. *
  205. * On the other hand, kp->opcode has a copy of the first byte of
  206. * the probed instruction, which is overwritten by int3. And
  207. * the instruction at kp->addr is not modified by kprobes except
  208. * for the first byte, we can recover the original instruction
  209. * from it and kp->opcode.
  210. */
  211. memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
  212. buf[0] = kp->opcode;
  213. return 0;
  214. }
  215. /* Check if paddr is at an instruction boundary */
  216. static int __kprobes can_probe(unsigned long paddr)
  217. {
  218. int ret;
  219. unsigned long addr, offset = 0;
  220. struct insn insn;
  221. kprobe_opcode_t buf[MAX_INSN_SIZE];
  222. if (!kallsyms_lookup_size_offset(paddr, NULL, &offset))
  223. return 0;
  224. /* Decode instructions */
  225. addr = paddr - offset;
  226. while (addr < paddr) {
  227. kernel_insn_init(&insn, (void *)addr);
  228. insn_get_opcode(&insn);
  229. /*
  230. * Check if the instruction has been modified by another
  231. * kprobe, in which case we replace the breakpoint by the
  232. * original instruction in our buffer.
  233. */
  234. if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) {
  235. ret = recover_probed_instruction(buf, addr);
  236. if (ret)
  237. /*
  238. * Another debugging subsystem might insert
  239. * this breakpoint. In that case, we can't
  240. * recover it.
  241. */
  242. return 0;
  243. kernel_insn_init(&insn, buf);
  244. }
  245. insn_get_length(&insn);
  246. addr += insn.length;
  247. }
  248. return (addr == paddr);
  249. }
  250. /*
  251. * Returns non-zero if opcode modifies the interrupt flag.
  252. */
  253. static int __kprobes is_IF_modifier(kprobe_opcode_t *insn)
  254. {
  255. /* Skip prefixes */
  256. insn = skip_prefixes(insn);
  257. switch (*insn) {
  258. case 0xfa: /* cli */
  259. case 0xfb: /* sti */
  260. case 0xcf: /* iret/iretd */
  261. case 0x9d: /* popf/popfd */
  262. return 1;
  263. }
  264. return 0;
  265. }
  266. /*
  267. * Copy an instruction and adjust the displacement if the instruction
  268. * uses the %rip-relative addressing mode.
  269. * If it does, Return the address of the 32-bit displacement word.
  270. * If not, return null.
  271. * Only applicable to 64-bit x86.
  272. */
  273. static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
  274. {
  275. struct insn insn;
  276. int ret;
  277. kprobe_opcode_t buf[MAX_INSN_SIZE];
  278. kernel_insn_init(&insn, src);
  279. if (recover) {
  280. insn_get_opcode(&insn);
  281. if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) {
  282. ret = recover_probed_instruction(buf,
  283. (unsigned long)src);
  284. if (ret)
  285. return 0;
  286. kernel_insn_init(&insn, buf);
  287. }
  288. }
  289. insn_get_length(&insn);
  290. memcpy(dest, insn.kaddr, insn.length);
  291. #ifdef CONFIG_X86_64
  292. if (insn_rip_relative(&insn)) {
  293. s64 newdisp;
  294. u8 *disp;
  295. kernel_insn_init(&insn, dest);
  296. insn_get_displacement(&insn);
  297. /*
  298. * The copied instruction uses the %rip-relative addressing
  299. * mode. Adjust the displacement for the difference between
  300. * the original location of this instruction and the location
  301. * of the copy that will actually be run. The tricky bit here
  302. * is making sure that the sign extension happens correctly in
  303. * this calculation, since we need a signed 32-bit result to
  304. * be sign-extended to 64 bits when it's added to the %rip
  305. * value and yield the same 64-bit result that the sign-
  306. * extension of the original signed 32-bit displacement would
  307. * have given.
  308. */
  309. newdisp = (u8 *) src + (s64) insn.displacement.value -
  310. (u8 *) dest;
  311. BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
  312. disp = (u8 *) dest + insn_offset_displacement(&insn);
  313. *(s32 *) disp = (s32) newdisp;
  314. }
  315. #endif
  316. return insn.length;
  317. }
  318. static void __kprobes arch_copy_kprobe(struct kprobe *p)
  319. {
  320. /*
  321. * Copy an instruction without recovering int3, because it will be
  322. * put by another subsystem.
  323. */
  324. __copy_instruction(p->ainsn.insn, p->addr, 0);
  325. if (can_boost(p->addr))
  326. p->ainsn.boostable = 0;
  327. else
  328. p->ainsn.boostable = -1;
  329. p->opcode = *p->addr;
  330. }
  331. int __kprobes arch_prepare_kprobe(struct kprobe *p)
  332. {
  333. if (alternatives_text_reserved(p->addr, p->addr))
  334. return -EINVAL;
  335. if (!can_probe((unsigned long)p->addr))
  336. return -EILSEQ;
  337. /* insn: must be on special executable page on x86. */
  338. p->ainsn.insn = get_insn_slot();
  339. if (!p->ainsn.insn)
  340. return -ENOMEM;
  341. arch_copy_kprobe(p);
  342. return 0;
  343. }
  344. void __kprobes arch_arm_kprobe(struct kprobe *p)
  345. {
  346. text_poke(p->addr, ((unsigned char []){BREAKPOINT_INSTRUCTION}), 1);
  347. }
  348. void __kprobes arch_disarm_kprobe(struct kprobe *p)
  349. {
  350. text_poke(p->addr, &p->opcode, 1);
  351. }
  352. void __kprobes arch_remove_kprobe(struct kprobe *p)
  353. {
  354. if (p->ainsn.insn) {
  355. free_insn_slot(p->ainsn.insn, (p->ainsn.boostable == 1));
  356. p->ainsn.insn = NULL;
  357. }
  358. }
  359. static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
  360. {
  361. kcb->prev_kprobe.kp = kprobe_running();
  362. kcb->prev_kprobe.status = kcb->kprobe_status;
  363. kcb->prev_kprobe.old_flags = kcb->kprobe_old_flags;
  364. kcb->prev_kprobe.saved_flags = kcb->kprobe_saved_flags;
  365. }
  366. static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
  367. {
  368. __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
  369. kcb->kprobe_status = kcb->prev_kprobe.status;
  370. kcb->kprobe_old_flags = kcb->prev_kprobe.old_flags;
  371. kcb->kprobe_saved_flags = kcb->prev_kprobe.saved_flags;
  372. }
  373. static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
  374. struct kprobe_ctlblk *kcb)
  375. {
  376. __this_cpu_write(current_kprobe, p);
  377. kcb->kprobe_saved_flags = kcb->kprobe_old_flags
  378. = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF));
  379. if (is_IF_modifier(p->ainsn.insn))
  380. kcb->kprobe_saved_flags &= ~X86_EFLAGS_IF;
  381. }
  382. static void __kprobes clear_btf(void)
  383. {
  384. if (test_thread_flag(TIF_BLOCKSTEP)) {
  385. unsigned long debugctl = get_debugctlmsr();
  386. debugctl &= ~DEBUGCTLMSR_BTF;
  387. update_debugctlmsr(debugctl);
  388. }
  389. }
  390. static void __kprobes restore_btf(void)
  391. {
  392. if (test_thread_flag(TIF_BLOCKSTEP)) {
  393. unsigned long debugctl = get_debugctlmsr();
  394. debugctl |= DEBUGCTLMSR_BTF;
  395. update_debugctlmsr(debugctl);
  396. }
  397. }
  398. void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
  399. struct pt_regs *regs)
  400. {
  401. unsigned long *sara = stack_addr(regs);
  402. ri->ret_addr = (kprobe_opcode_t *) *sara;
  403. /* Replace the return addr with trampoline addr */
  404. *sara = (unsigned long) &kretprobe_trampoline;
  405. }
  406. #ifdef CONFIG_OPTPROBES
  407. static int __kprobes setup_detour_execution(struct kprobe *p,
  408. struct pt_regs *regs,
  409. int reenter);
  410. #else
  411. #define setup_detour_execution(p, regs, reenter) (0)
  412. #endif
  413. static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
  414. struct kprobe_ctlblk *kcb, int reenter)
  415. {
  416. if (setup_detour_execution(p, regs, reenter))
  417. return;
  418. #if !defined(CONFIG_PREEMPT)
  419. if (p->ainsn.boostable == 1 && !p->post_handler) {
  420. /* Boost up -- we can execute copied instructions directly */
  421. if (!reenter)
  422. reset_current_kprobe();
  423. /*
  424. * Reentering boosted probe doesn't reset current_kprobe,
  425. * nor set current_kprobe, because it doesn't use single
  426. * stepping.
  427. */
  428. regs->ip = (unsigned long)p->ainsn.insn;
  429. preempt_enable_no_resched();
  430. return;
  431. }
  432. #endif
  433. if (reenter) {
  434. save_previous_kprobe(kcb);
  435. set_current_kprobe(p, regs, kcb);
  436. kcb->kprobe_status = KPROBE_REENTER;
  437. } else
  438. kcb->kprobe_status = KPROBE_HIT_SS;
  439. /* Prepare real single stepping */
  440. clear_btf();
  441. regs->flags |= X86_EFLAGS_TF;
  442. regs->flags &= ~X86_EFLAGS_IF;
  443. /* single step inline if the instruction is an int3 */
  444. if (p->opcode == BREAKPOINT_INSTRUCTION)
  445. regs->ip = (unsigned long)p->addr;
  446. else
  447. regs->ip = (unsigned long)p->ainsn.insn;
  448. }
  449. /*
  450. * We have reentered the kprobe_handler(), since another probe was hit while
  451. * within the handler. We save the original kprobes variables and just single
  452. * step on the instruction of the new probe without calling any user handlers.
  453. */
  454. static int __kprobes reenter_kprobe(struct kprobe *p, struct pt_regs *regs,
  455. struct kprobe_ctlblk *kcb)
  456. {
  457. switch (kcb->kprobe_status) {
  458. case KPROBE_HIT_SSDONE:
  459. case KPROBE_HIT_ACTIVE:
  460. kprobes_inc_nmissed_count(p);
  461. setup_singlestep(p, regs, kcb, 1);
  462. break;
  463. case KPROBE_HIT_SS:
  464. /* A probe has been hit in the codepath leading up to, or just
  465. * after, single-stepping of a probed instruction. This entire
  466. * codepath should strictly reside in .kprobes.text section.
  467. * Raise a BUG or we'll continue in an endless reentering loop
  468. * and eventually a stack overflow.
  469. */
  470. printk(KERN_WARNING "Unrecoverable kprobe detected at %p.\n",
  471. p->addr);
  472. dump_kprobe(p);
  473. BUG();
  474. default:
  475. /* impossible cases */
  476. WARN_ON(1);
  477. return 0;
  478. }
  479. return 1;
  480. }
  481. /*
  482. * Interrupts are disabled on entry as trap3 is an interrupt gate and they
  483. * remain disabled throughout this function.
  484. */
  485. static int __kprobes kprobe_handler(struct pt_regs *regs)
  486. {
  487. kprobe_opcode_t *addr;
  488. struct kprobe *p;
  489. struct kprobe_ctlblk *kcb;
  490. addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
  491. /*
  492. * We don't want to be preempted for the entire
  493. * duration of kprobe processing. We conditionally
  494. * re-enable preemption at the end of this function,
  495. * and also in reenter_kprobe() and setup_singlestep().
  496. */
  497. preempt_disable();
  498. kcb = get_kprobe_ctlblk();
  499. p = get_kprobe(addr);
  500. if (p) {
  501. if (kprobe_running()) {
  502. if (reenter_kprobe(p, regs, kcb))
  503. return 1;
  504. } else {
  505. set_current_kprobe(p, regs, kcb);
  506. kcb->kprobe_status = KPROBE_HIT_ACTIVE;
  507. /*
  508. * If we have no pre-handler or it returned 0, we
  509. * continue with normal processing. If we have a
  510. * pre-handler and it returned non-zero, it prepped
  511. * for calling the break_handler below on re-entry
  512. * for jprobe processing, so get out doing nothing
  513. * more here.
  514. */
  515. if (!p->pre_handler || !p->pre_handler(p, regs))
  516. setup_singlestep(p, regs, kcb, 0);
  517. return 1;
  518. }
  519. } else if (*addr != BREAKPOINT_INSTRUCTION) {
  520. /*
  521. * The breakpoint instruction was removed right
  522. * after we hit it. Another cpu has removed
  523. * either a probepoint or a debugger breakpoint
  524. * at this address. In either case, no further
  525. * handling of this interrupt is appropriate.
  526. * Back up over the (now missing) int3 and run
  527. * the original instruction.
  528. */
  529. regs->ip = (unsigned long)addr;
  530. preempt_enable_no_resched();
  531. return 1;
  532. } else if (kprobe_running()) {
  533. p = __this_cpu_read(current_kprobe);
  534. if (p->break_handler && p->break_handler(p, regs)) {
  535. setup_singlestep(p, regs, kcb, 0);
  536. return 1;
  537. }
  538. } /* else: not a kprobe fault; let the kernel handle it */
  539. preempt_enable_no_resched();
  540. return 0;
  541. }
  542. #ifdef CONFIG_X86_64
  543. #define SAVE_REGS_STRING \
  544. /* Skip cs, ip, orig_ax. */ \
  545. " subq $24, %rsp\n" \
  546. " pushq %rdi\n" \
  547. " pushq %rsi\n" \
  548. " pushq %rdx\n" \
  549. " pushq %rcx\n" \
  550. " pushq %rax\n" \
  551. " pushq %r8\n" \
  552. " pushq %r9\n" \
  553. " pushq %r10\n" \
  554. " pushq %r11\n" \
  555. " pushq %rbx\n" \
  556. " pushq %rbp\n" \
  557. " pushq %r12\n" \
  558. " pushq %r13\n" \
  559. " pushq %r14\n" \
  560. " pushq %r15\n"
  561. #define RESTORE_REGS_STRING \
  562. " popq %r15\n" \
  563. " popq %r14\n" \
  564. " popq %r13\n" \
  565. " popq %r12\n" \
  566. " popq %rbp\n" \
  567. " popq %rbx\n" \
  568. " popq %r11\n" \
  569. " popq %r10\n" \
  570. " popq %r9\n" \
  571. " popq %r8\n" \
  572. " popq %rax\n" \
  573. " popq %rcx\n" \
  574. " popq %rdx\n" \
  575. " popq %rsi\n" \
  576. " popq %rdi\n" \
  577. /* Skip orig_ax, ip, cs */ \
  578. " addq $24, %rsp\n"
  579. #else
  580. #define SAVE_REGS_STRING \
  581. /* Skip cs, ip, orig_ax and gs. */ \
  582. " subl $16, %esp\n" \
  583. " pushl %fs\n" \
  584. " pushl %es\n" \
  585. " pushl %ds\n" \
  586. " pushl %eax\n" \
  587. " pushl %ebp\n" \
  588. " pushl %edi\n" \
  589. " pushl %esi\n" \
  590. " pushl %edx\n" \
  591. " pushl %ecx\n" \
  592. " pushl %ebx\n"
  593. #define RESTORE_REGS_STRING \
  594. " popl %ebx\n" \
  595. " popl %ecx\n" \
  596. " popl %edx\n" \
  597. " popl %esi\n" \
  598. " popl %edi\n" \
  599. " popl %ebp\n" \
  600. " popl %eax\n" \
  601. /* Skip ds, es, fs, gs, orig_ax, and ip. Note: don't pop cs here*/\
  602. " addl $24, %esp\n"
  603. #endif
  604. /*
  605. * When a retprobed function returns, this code saves registers and
  606. * calls trampoline_handler() runs, which calls the kretprobe's handler.
  607. */
  608. static void __used __kprobes kretprobe_trampoline_holder(void)
  609. {
  610. asm volatile (
  611. ".global kretprobe_trampoline\n"
  612. "kretprobe_trampoline: \n"
  613. #ifdef CONFIG_X86_64
  614. /* We don't bother saving the ss register */
  615. " pushq %rsp\n"
  616. " pushfq\n"
  617. SAVE_REGS_STRING
  618. " movq %rsp, %rdi\n"
  619. " call trampoline_handler\n"
  620. /* Replace saved sp with true return address. */
  621. " movq %rax, 152(%rsp)\n"
  622. RESTORE_REGS_STRING
  623. " popfq\n"
  624. #else
  625. " pushf\n"
  626. SAVE_REGS_STRING
  627. " movl %esp, %eax\n"
  628. " call trampoline_handler\n"
  629. /* Move flags to cs */
  630. " movl 56(%esp), %edx\n"
  631. " movl %edx, 52(%esp)\n"
  632. /* Replace saved flags with true return address. */
  633. " movl %eax, 56(%esp)\n"
  634. RESTORE_REGS_STRING
  635. " popf\n"
  636. #endif
  637. " ret\n");
  638. }
  639. /*
  640. * Called from kretprobe_trampoline
  641. */
  642. static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
  643. {
  644. struct kretprobe_instance *ri = NULL;
  645. struct hlist_head *head, empty_rp;
  646. struct hlist_node *node, *tmp;
  647. unsigned long flags, orig_ret_address = 0;
  648. unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
  649. kprobe_opcode_t *correct_ret_addr = NULL;
  650. INIT_HLIST_HEAD(&empty_rp);
  651. kretprobe_hash_lock(current, &head, &flags);
  652. /* fixup registers */
  653. #ifdef CONFIG_X86_64
  654. regs->cs = __KERNEL_CS;
  655. #else
  656. regs->cs = __KERNEL_CS | get_kernel_rpl();
  657. regs->gs = 0;
  658. #endif
  659. regs->ip = trampoline_address;
  660. regs->orig_ax = ~0UL;
  661. /*
  662. * It is possible to have multiple instances associated with a given
  663. * task either because multiple functions in the call path have
  664. * return probes installed on them, and/or more than one
  665. * return probe was registered for a target function.
  666. *
  667. * We can handle this because:
  668. * - instances are always pushed into the head of the list
  669. * - when multiple return probes are registered for the same
  670. * function, the (chronologically) first instance's ret_addr
  671. * will be the real return address, and all the rest will
  672. * point to kretprobe_trampoline.
  673. */
  674. hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
  675. if (ri->task != current)
  676. /* another task is sharing our hash bucket */
  677. continue;
  678. orig_ret_address = (unsigned long)ri->ret_addr;
  679. if (orig_ret_address != trampoline_address)
  680. /*
  681. * This is the real return address. Any other
  682. * instances associated with this task are for
  683. * other calls deeper on the call stack
  684. */
  685. break;
  686. }
  687. kretprobe_assert(ri, orig_ret_address, trampoline_address);
  688. correct_ret_addr = ri->ret_addr;
  689. hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
  690. if (ri->task != current)
  691. /* another task is sharing our hash bucket */
  692. continue;
  693. orig_ret_address = (unsigned long)ri->ret_addr;
  694. if (ri->rp && ri->rp->handler) {
  695. __this_cpu_write(current_kprobe, &ri->rp->kp);
  696. get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
  697. ri->ret_addr = correct_ret_addr;
  698. ri->rp->handler(ri, regs);
  699. __this_cpu_write(current_kprobe, NULL);
  700. }
  701. recycle_rp_inst(ri, &empty_rp);
  702. if (orig_ret_address != trampoline_address)
  703. /*
  704. * This is the real return address. Any other
  705. * instances associated with this task are for
  706. * other calls deeper on the call stack
  707. */
  708. break;
  709. }
  710. kretprobe_hash_unlock(current, &flags);
  711. hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
  712. hlist_del(&ri->hlist);
  713. kfree(ri);
  714. }
  715. return (void *)orig_ret_address;
  716. }
  717. /*
  718. * Called after single-stepping. p->addr is the address of the
  719. * instruction whose first byte has been replaced by the "int 3"
  720. * instruction. To avoid the SMP problems that can occur when we
  721. * temporarily put back the original opcode to single-step, we
  722. * single-stepped a copy of the instruction. The address of this
  723. * copy is p->ainsn.insn.
  724. *
  725. * This function prepares to return from the post-single-step
  726. * interrupt. We have to fix up the stack as follows:
  727. *
  728. * 0) Except in the case of absolute or indirect jump or call instructions,
  729. * the new ip is relative to the copied instruction. We need to make
  730. * it relative to the original instruction.
  731. *
  732. * 1) If the single-stepped instruction was pushfl, then the TF and IF
  733. * flags are set in the just-pushed flags, and may need to be cleared.
  734. *
  735. * 2) If the single-stepped instruction was a call, the return address
  736. * that is atop the stack is the address following the copied instruction.
  737. * We need to make it the address following the original instruction.
  738. *
  739. * If this is the first time we've single-stepped the instruction at
  740. * this probepoint, and the instruction is boostable, boost it: add a
  741. * jump instruction after the copied instruction, that jumps to the next
  742. * instruction after the probepoint.
  743. */
  744. static void __kprobes resume_execution(struct kprobe *p,
  745. struct pt_regs *regs, struct kprobe_ctlblk *kcb)
  746. {
  747. unsigned long *tos = stack_addr(regs);
  748. unsigned long copy_ip = (unsigned long)p->ainsn.insn;
  749. unsigned long orig_ip = (unsigned long)p->addr;
  750. kprobe_opcode_t *insn = p->ainsn.insn;
  751. /* Skip prefixes */
  752. insn = skip_prefixes(insn);
  753. regs->flags &= ~X86_EFLAGS_TF;
  754. switch (*insn) {
  755. case 0x9c: /* pushfl */
  756. *tos &= ~(X86_EFLAGS_TF | X86_EFLAGS_IF);
  757. *tos |= kcb->kprobe_old_flags;
  758. break;
  759. case 0xc2: /* iret/ret/lret */
  760. case 0xc3:
  761. case 0xca:
  762. case 0xcb:
  763. case 0xcf:
  764. case 0xea: /* jmp absolute -- ip is correct */
  765. /* ip is already adjusted, no more changes required */
  766. p->ainsn.boostable = 1;
  767. goto no_change;
  768. case 0xe8: /* call relative - Fix return addr */
  769. *tos = orig_ip + (*tos - copy_ip);
  770. break;
  771. #ifdef CONFIG_X86_32
  772. case 0x9a: /* call absolute -- same as call absolute, indirect */
  773. *tos = orig_ip + (*tos - copy_ip);
  774. goto no_change;
  775. #endif
  776. case 0xff:
  777. if ((insn[1] & 0x30) == 0x10) {
  778. /*
  779. * call absolute, indirect
  780. * Fix return addr; ip is correct.
  781. * But this is not boostable
  782. */
  783. *tos = orig_ip + (*tos - copy_ip);
  784. goto no_change;
  785. } else if (((insn[1] & 0x31) == 0x20) ||
  786. ((insn[1] & 0x31) == 0x21)) {
  787. /*
  788. * jmp near and far, absolute indirect
  789. * ip is correct. And this is boostable
  790. */
  791. p->ainsn.boostable = 1;
  792. goto no_change;
  793. }
  794. default:
  795. break;
  796. }
  797. if (p->ainsn.boostable == 0) {
  798. if ((regs->ip > copy_ip) &&
  799. (regs->ip - copy_ip) + 5 < MAX_INSN_SIZE) {
  800. /*
  801. * These instructions can be executed directly if it
  802. * jumps back to correct address.
  803. */
  804. synthesize_reljump((void *)regs->ip,
  805. (void *)orig_ip + (regs->ip - copy_ip));
  806. p->ainsn.boostable = 1;
  807. } else {
  808. p->ainsn.boostable = -1;
  809. }
  810. }
  811. regs->ip += orig_ip - copy_ip;
  812. no_change:
  813. restore_btf();
  814. }
  815. /*
  816. * Interrupts are disabled on entry as trap1 is an interrupt gate and they
  817. * remain disabled throughout this function.
  818. */
  819. static int __kprobes post_kprobe_handler(struct pt_regs *regs)
  820. {
  821. struct kprobe *cur = kprobe_running();
  822. struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
  823. if (!cur)
  824. return 0;
  825. resume_execution(cur, regs, kcb);
  826. regs->flags |= kcb->kprobe_saved_flags;
  827. if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
  828. kcb->kprobe_status = KPROBE_HIT_SSDONE;
  829. cur->post_handler(cur, regs, 0);
  830. }
  831. /* Restore back the original saved kprobes variables and continue. */
  832. if (kcb->kprobe_status == KPROBE_REENTER) {
  833. restore_previous_kprobe(kcb);
  834. goto out;
  835. }
  836. reset_current_kprobe();
  837. out:
  838. preempt_enable_no_resched();
  839. /*
  840. * if somebody else is singlestepping across a probe point, flags
  841. * will have TF set, in which case, continue the remaining processing
  842. * of do_debug, as if this is not a probe hit.
  843. */
  844. if (regs->flags & X86_EFLAGS_TF)
  845. return 0;
  846. return 1;
  847. }
  848. int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
  849. {
  850. struct kprobe *cur = kprobe_running();
  851. struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
  852. switch (kcb->kprobe_status) {
  853. case KPROBE_HIT_SS:
  854. case KPROBE_REENTER:
  855. /*
  856. * We are here because the instruction being single
  857. * stepped caused a page fault. We reset the current
  858. * kprobe and the ip points back to the probe address
  859. * and allow the page fault handler to continue as a
  860. * normal page fault.
  861. */
  862. regs->ip = (unsigned long)cur->addr;
  863. regs->flags |= kcb->kprobe_old_flags;
  864. if (kcb->kprobe_status == KPROBE_REENTER)
  865. restore_previous_kprobe(kcb);
  866. else
  867. reset_current_kprobe();
  868. preempt_enable_no_resched();
  869. break;
  870. case KPROBE_HIT_ACTIVE:
  871. case KPROBE_HIT_SSDONE:
  872. /*
  873. * We increment the nmissed count for accounting,
  874. * we can also use npre/npostfault count for accounting
  875. * these specific fault cases.
  876. */
  877. kprobes_inc_nmissed_count(cur);
  878. /*
  879. * We come here because instructions in the pre/post
  880. * handler caused the page_fault, this could happen
  881. * if handler tries to access user space by
  882. * copy_from_user(), get_user() etc. Let the
  883. * user-specified handler try to fix it first.
  884. */
  885. if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
  886. return 1;
  887. /*
  888. * In case the user-specified fault handler returned
  889. * zero, try to fix up.
  890. */
  891. if (fixup_exception(regs))
  892. return 1;
  893. /*
  894. * fixup routine could not handle it,
  895. * Let do_page_fault() fix it.
  896. */
  897. break;
  898. default:
  899. break;
  900. }
  901. return 0;
  902. }
  903. /*
  904. * Wrapper routine for handling exceptions.
  905. */
  906. int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
  907. unsigned long val, void *data)
  908. {
  909. struct die_args *args = data;
  910. int ret = NOTIFY_DONE;
  911. if (args->regs && user_mode_vm(args->regs))
  912. return ret;
  913. switch (val) {
  914. case DIE_INT3:
  915. if (kprobe_handler(args->regs))
  916. ret = NOTIFY_STOP;
  917. break;
  918. case DIE_DEBUG:
  919. if (post_kprobe_handler(args->regs)) {
  920. /*
  921. * Reset the BS bit in dr6 (pointed by args->err) to
  922. * denote completion of processing
  923. */
  924. (*(unsigned long *)ERR_PTR(args->err)) &= ~DR_STEP;
  925. ret = NOTIFY_STOP;
  926. }
  927. break;
  928. case DIE_GPF:
  929. /*
  930. * To be potentially processing a kprobe fault and to
  931. * trust the result from kprobe_running(), we have
  932. * be non-preemptible.
  933. */
  934. if (!preemptible() && kprobe_running() &&
  935. kprobe_fault_handler(args->regs, args->trapnr))
  936. ret = NOTIFY_STOP;
  937. break;
  938. default:
  939. break;
  940. }
  941. return ret;
  942. }
  943. int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
  944. {
  945. struct jprobe *jp = container_of(p, struct jprobe, kp);
  946. unsigned long addr;
  947. struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
  948. kcb->jprobe_saved_regs = *regs;
  949. kcb->jprobe_saved_sp = stack_addr(regs);
  950. addr = (unsigned long)(kcb->jprobe_saved_sp);
  951. /*
  952. * As Linus pointed out, gcc assumes that the callee
  953. * owns the argument space and could overwrite it, e.g.
  954. * tailcall optimization. So, to be absolutely safe
  955. * we also save and restore enough stack bytes to cover
  956. * the argument area.
  957. */
  958. memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr,
  959. MIN_STACK_SIZE(addr));
  960. regs->flags &= ~X86_EFLAGS_IF;
  961. trace_hardirqs_off();
  962. regs->ip = (unsigned long)(jp->entry);
  963. return 1;
  964. }
  965. void __kprobes jprobe_return(void)
  966. {
  967. struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
  968. asm volatile (
  969. #ifdef CONFIG_X86_64
  970. " xchg %%rbx,%%rsp \n"
  971. #else
  972. " xchgl %%ebx,%%esp \n"
  973. #endif
  974. " int3 \n"
  975. " .globl jprobe_return_end\n"
  976. " jprobe_return_end: \n"
  977. " nop \n"::"b"
  978. (kcb->jprobe_saved_sp):"memory");
  979. }
  980. int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
  981. {
  982. struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
  983. u8 *addr = (u8 *) (regs->ip - 1);
  984. struct jprobe *jp = container_of(p, struct jprobe, kp);
  985. if ((addr > (u8 *) jprobe_return) &&
  986. (addr < (u8 *) jprobe_return_end)) {
  987. if (stack_addr(regs) != kcb->jprobe_saved_sp) {
  988. struct pt_regs *saved_regs = &kcb->jprobe_saved_regs;
  989. printk(KERN_ERR
  990. "current sp %p does not match saved sp %p\n",
  991. stack_addr(regs), kcb->jprobe_saved_sp);
  992. printk(KERN_ERR "Saved registers for jprobe %p\n", jp);
  993. show_registers(saved_regs);
  994. printk(KERN_ERR "Current registers\n");
  995. show_registers(regs);
  996. BUG();
  997. }
  998. *regs = kcb->jprobe_saved_regs;
  999. memcpy((kprobe_opcode_t *)(kcb->jprobe_saved_sp),
  1000. kcb->jprobes_stack,
  1001. MIN_STACK_SIZE(kcb->jprobe_saved_sp));
  1002. preempt_enable_no_resched();
  1003. return 1;
  1004. }
  1005. return 0;
  1006. }
  1007. #ifdef CONFIG_OPTPROBES
  1008. /* Insert a call instruction at address 'from', which calls address 'to'.*/
  1009. static void __kprobes synthesize_relcall(void *from, void *to)
  1010. {
  1011. __synthesize_relative_insn(from, to, RELATIVECALL_OPCODE);
  1012. }
  1013. /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
  1014. static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr,
  1015. unsigned long val)
  1016. {
  1017. #ifdef CONFIG_X86_64
  1018. *addr++ = 0x48;
  1019. *addr++ = 0xbf;
  1020. #else
  1021. *addr++ = 0xb8;
  1022. #endif
  1023. *(unsigned long *)addr = val;
  1024. }
  1025. static void __used __kprobes kprobes_optinsn_template_holder(void)
  1026. {
  1027. asm volatile (
  1028. ".global optprobe_template_entry\n"
  1029. "optprobe_template_entry: \n"
  1030. #ifdef CONFIG_X86_64
  1031. /* We don't bother saving the ss register */
  1032. " pushq %rsp\n"
  1033. " pushfq\n"
  1034. SAVE_REGS_STRING
  1035. " movq %rsp, %rsi\n"
  1036. ".global optprobe_template_val\n"
  1037. "optprobe_template_val: \n"
  1038. ASM_NOP5
  1039. ASM_NOP5
  1040. ".global optprobe_template_call\n"
  1041. "optprobe_template_call: \n"
  1042. ASM_NOP5
  1043. /* Move flags to rsp */
  1044. " movq 144(%rsp), %rdx\n"
  1045. " movq %rdx, 152(%rsp)\n"
  1046. RESTORE_REGS_STRING
  1047. /* Skip flags entry */
  1048. " addq $8, %rsp\n"
  1049. " popfq\n"
  1050. #else /* CONFIG_X86_32 */
  1051. " pushf\n"
  1052. SAVE_REGS_STRING
  1053. " movl %esp, %edx\n"
  1054. ".global optprobe_template_val\n"
  1055. "optprobe_template_val: \n"
  1056. ASM_NOP5
  1057. ".global optprobe_template_call\n"
  1058. "optprobe_template_call: \n"
  1059. ASM_NOP5
  1060. RESTORE_REGS_STRING
  1061. " addl $4, %esp\n" /* skip cs */
  1062. " popf\n"
  1063. #endif
  1064. ".global optprobe_template_end\n"
  1065. "optprobe_template_end: \n");
  1066. }
  1067. #define TMPL_MOVE_IDX \
  1068. ((long)&optprobe_template_val - (long)&optprobe_template_entry)
  1069. #define TMPL_CALL_IDX \
  1070. ((long)&optprobe_template_call - (long)&optprobe_template_entry)
  1071. #define TMPL_END_IDX \
  1072. ((long)&optprobe_template_end - (long)&optprobe_template_entry)
  1073. #define INT3_SIZE sizeof(kprobe_opcode_t)
  1074. /* Optimized kprobe call back function: called from optinsn */
  1075. static void __kprobes optimized_callback(struct optimized_kprobe *op,
  1076. struct pt_regs *regs)
  1077. {
  1078. struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
  1079. unsigned long flags;
  1080. /* This is possible if op is under delayed unoptimizing */
  1081. if (kprobe_disabled(&op->kp))
  1082. return;
  1083. local_irq_save(flags);
  1084. if (kprobe_running()) {
  1085. kprobes_inc_nmissed_count(&op->kp);
  1086. } else {
  1087. /* Save skipped registers */
  1088. #ifdef CONFIG_X86_64
  1089. regs->cs = __KERNEL_CS;
  1090. #else
  1091. regs->cs = __KERNEL_CS | get_kernel_rpl();
  1092. regs->gs = 0;
  1093. #endif
  1094. regs->ip = (unsigned long)op->kp.addr + INT3_SIZE;
  1095. regs->orig_ax = ~0UL;
  1096. __this_cpu_write(current_kprobe, &op->kp);
  1097. kcb->kprobe_status = KPROBE_HIT_ACTIVE;
  1098. opt_pre_handler(&op->kp, regs);
  1099. __this_cpu_write(current_kprobe, NULL);
  1100. }
  1101. local_irq_restore(flags);
  1102. }
  1103. static int __kprobes copy_optimized_instructions(u8 *dest, u8 *src)
  1104. {
  1105. int len = 0, ret;
  1106. while (len < RELATIVEJUMP_SIZE) {
  1107. ret = __copy_instruction(dest + len, src + len, 1);
  1108. if (!ret || !can_boost(dest + len))
  1109. return -EINVAL;
  1110. len += ret;
  1111. }
  1112. /* Check whether the address range is reserved */
  1113. if (ftrace_text_reserved(src, src + len - 1) ||
  1114. alternatives_text_reserved(src, src + len - 1) ||
  1115. jump_label_text_reserved(src, src + len - 1))
  1116. return -EBUSY;
  1117. return len;
  1118. }
  1119. /* Check whether insn is indirect jump */
  1120. static int __kprobes insn_is_indirect_jump(struct insn *insn)
  1121. {
  1122. return ((insn->opcode.bytes[0] == 0xff &&
  1123. (X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */
  1124. insn->opcode.bytes[0] == 0xea); /* Segment based jump */
  1125. }
  1126. /* Check whether insn jumps into specified address range */
  1127. static int insn_jump_into_range(struct insn *insn, unsigned long start, int len)
  1128. {
  1129. unsigned long target = 0;
  1130. switch (insn->opcode.bytes[0]) {
  1131. case 0xe0: /* loopne */
  1132. case 0xe1: /* loope */
  1133. case 0xe2: /* loop */
  1134. case 0xe3: /* jcxz */
  1135. case 0xe9: /* near relative jump */
  1136. case 0xeb: /* short relative jump */
  1137. break;
  1138. case 0x0f:
  1139. if ((insn->opcode.bytes[1] & 0xf0) == 0x80) /* jcc near */
  1140. break;
  1141. return 0;
  1142. default:
  1143. if ((insn->opcode.bytes[0] & 0xf0) == 0x70) /* jcc short */
  1144. break;
  1145. return 0;
  1146. }
  1147. target = (unsigned long)insn->next_byte + insn->immediate.value;
  1148. return (start <= target && target <= start + len);
  1149. }
  1150. /* Decode whole function to ensure any instructions don't jump into target */
  1151. static int __kprobes can_optimize(unsigned long paddr)
  1152. {
  1153. int ret;
  1154. unsigned long addr, size = 0, offset = 0;
  1155. struct insn insn;
  1156. kprobe_opcode_t buf[MAX_INSN_SIZE];
  1157. /* Lookup symbol including addr */
  1158. if (!kallsyms_lookup_size_offset(paddr, &size, &offset))
  1159. return 0;
  1160. /*
  1161. * Do not optimize in the entry code due to the unstable
  1162. * stack handling.
  1163. */
  1164. if ((paddr >= (unsigned long )__entry_text_start) &&
  1165. (paddr < (unsigned long )__entry_text_end))
  1166. return 0;
  1167. /* Check there is enough space for a relative jump. */
  1168. if (size - offset < RELATIVEJUMP_SIZE)
  1169. return 0;
  1170. /* Decode instructions */
  1171. addr = paddr - offset;
  1172. while (addr < paddr - offset + size) { /* Decode until function end */
  1173. if (search_exception_tables(addr))
  1174. /*
  1175. * Since some fixup code will jumps into this function,
  1176. * we can't optimize kprobe in this function.
  1177. */
  1178. return 0;
  1179. kernel_insn_init(&insn, (void *)addr);
  1180. insn_get_opcode(&insn);
  1181. if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) {
  1182. ret = recover_probed_instruction(buf, addr);
  1183. if (ret)
  1184. return 0;
  1185. kernel_insn_init(&insn, buf);
  1186. }
  1187. insn_get_length(&insn);
  1188. /* Recover address */
  1189. insn.kaddr = (void *)addr;
  1190. insn.next_byte = (void *)(addr + insn.length);
  1191. /* Check any instructions don't jump into target */
  1192. if (insn_is_indirect_jump(&insn) ||
  1193. insn_jump_into_range(&insn, paddr + INT3_SIZE,
  1194. RELATIVE_ADDR_SIZE))
  1195. return 0;
  1196. addr += insn.length;
  1197. }
  1198. return 1;
  1199. }
  1200. /* Check optimized_kprobe can actually be optimized. */
  1201. int __kprobes arch_check_optimized_kprobe(struct optimized_kprobe *op)
  1202. {
  1203. int i;
  1204. struct kprobe *p;
  1205. for (i = 1; i < op->optinsn.size; i++) {
  1206. p = get_kprobe(op->kp.addr + i);
  1207. if (p && !kprobe_disabled(p))
  1208. return -EEXIST;
  1209. }
  1210. return 0;
  1211. }
  1212. /* Check the addr is within the optimized instructions. */
  1213. int __kprobes arch_within_optimized_kprobe(struct optimized_kprobe *op,
  1214. unsigned long addr)
  1215. {
  1216. return ((unsigned long)op->kp.addr <= addr &&
  1217. (unsigned long)op->kp.addr + op->optinsn.size > addr);
  1218. }
  1219. /* Free optimized instruction slot */
  1220. static __kprobes
  1221. void __arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty)
  1222. {
  1223. if (op->optinsn.insn) {
  1224. free_optinsn_slot(op->optinsn.insn, dirty);
  1225. op->optinsn.insn = NULL;
  1226. op->optinsn.size = 0;
  1227. }
  1228. }
  1229. void __kprobes arch_remove_optimized_kprobe(struct optimized_kprobe *op)
  1230. {
  1231. __arch_remove_optimized_kprobe(op, 1);
  1232. }
  1233. /*
  1234. * Copy replacing target instructions
  1235. * Target instructions MUST be relocatable (checked inside)
  1236. */
  1237. int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
  1238. {
  1239. u8 *buf;
  1240. int ret;
  1241. long rel;
  1242. if (!can_optimize((unsigned long)op->kp.addr))
  1243. return -EILSEQ;
  1244. op->optinsn.insn = get_optinsn_slot();
  1245. if (!op->optinsn.insn)
  1246. return -ENOMEM;
  1247. /*
  1248. * Verify if the address gap is in 2GB range, because this uses
  1249. * a relative jump.
  1250. */
  1251. rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
  1252. if (abs(rel) > 0x7fffffff)
  1253. return -ERANGE;
  1254. buf = (u8 *)op->optinsn.insn;
  1255. /* Copy instructions into the out-of-line buffer */
  1256. ret = copy_optimized_instructions(buf + TMPL_END_IDX, op->kp.addr);
  1257. if (ret < 0) {
  1258. __arch_remove_optimized_kprobe(op, 0);
  1259. return ret;
  1260. }
  1261. op->optinsn.size = ret;
  1262. /* Copy arch-dep-instance from template */
  1263. memcpy(buf, &optprobe_template_entry, TMPL_END_IDX);
  1264. /* Set probe information */
  1265. synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
  1266. /* Set probe function call */
  1267. synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
  1268. /* Set returning jmp instruction at the tail of out-of-line buffer */
  1269. synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
  1270. (u8 *)op->kp.addr + op->optinsn.size);
  1271. flush_icache_range((unsigned long) buf,
  1272. (unsigned long) buf + TMPL_END_IDX +
  1273. op->optinsn.size + RELATIVEJUMP_SIZE);
  1274. return 0;
  1275. }
  1276. #define MAX_OPTIMIZE_PROBES 256
  1277. static struct text_poke_param *jump_poke_params;
  1278. static struct jump_poke_buffer {
  1279. u8 buf[RELATIVEJUMP_SIZE];
  1280. } *jump_poke_bufs;
  1281. static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
  1282. u8 *insn_buf,
  1283. struct optimized_kprobe *op)
  1284. {
  1285. s32 rel = (s32)((long)op->optinsn.insn -
  1286. ((long)op->kp.addr + RELATIVEJUMP_SIZE));
  1287. /* Backup instructions which will be replaced by jump address */
  1288. memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
  1289. RELATIVE_ADDR_SIZE);
  1290. insn_buf[0] = RELATIVEJUMP_OPCODE;
  1291. *(s32 *)(&insn_buf[1]) = rel;
  1292. tprm->addr = op->kp.addr;
  1293. tprm->opcode = insn_buf;
  1294. tprm->len = RELATIVEJUMP_SIZE;
  1295. }
  1296. /*
  1297. * Replace breakpoints (int3) with relative jumps.
  1298. * Caller must call with locking kprobe_mutex and text_mutex.
  1299. */
  1300. void __kprobes arch_optimize_kprobes(struct list_head *oplist)
  1301. {
  1302. struct optimized_kprobe *op, *tmp;
  1303. int c = 0;
  1304. list_for_each_entry_safe(op, tmp, oplist, list) {
  1305. WARN_ON(kprobe_disabled(&op->kp));
  1306. /* Setup param */
  1307. setup_optimize_kprobe(&jump_poke_params[c],
  1308. jump_poke_bufs[c].buf, op);
  1309. list_del_init(&op->list);
  1310. if (++c >= MAX_OPTIMIZE_PROBES)
  1311. break;
  1312. }
  1313. /*
  1314. * text_poke_smp doesn't support NMI/MCE code modifying.
  1315. * However, since kprobes itself also doesn't support NMI/MCE
  1316. * code probing, it's not a problem.
  1317. */
  1318. text_poke_smp_batch(jump_poke_params, c);
  1319. }
  1320. static void __kprobes setup_unoptimize_kprobe(struct text_poke_param *tprm,
  1321. u8 *insn_buf,
  1322. struct optimized_kprobe *op)
  1323. {
  1324. /* Set int3 to first byte for kprobes */
  1325. insn_buf[0] = BREAKPOINT_INSTRUCTION;
  1326. memcpy(insn_buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE);
  1327. tprm->addr = op->kp.addr;
  1328. tprm->opcode = insn_buf;
  1329. tprm->len = RELATIVEJUMP_SIZE;
  1330. }
  1331. /*
  1332. * Recover original instructions and breakpoints from relative jumps.
  1333. * Caller must call with locking kprobe_mutex.
  1334. */
  1335. extern void arch_unoptimize_kprobes(struct list_head *oplist,
  1336. struct list_head *done_list)
  1337. {
  1338. struct optimized_kprobe *op, *tmp;
  1339. int c = 0;
  1340. list_for_each_entry_safe(op, tmp, oplist, list) {
  1341. /* Setup param */
  1342. setup_unoptimize_kprobe(&jump_poke_params[c],
  1343. jump_poke_bufs[c].buf, op);
  1344. list_move(&op->list, done_list);
  1345. if (++c >= MAX_OPTIMIZE_PROBES)
  1346. break;
  1347. }
  1348. /*
  1349. * text_poke_smp doesn't support NMI/MCE code modifying.
  1350. * However, since kprobes itself also doesn't support NMI/MCE
  1351. * code probing, it's not a problem.
  1352. */
  1353. text_poke_smp_batch(jump_poke_params, c);
  1354. }
  1355. /* Replace a relative jump with a breakpoint (int3). */
  1356. void __kprobes arch_unoptimize_kprobe(struct optimized_kprobe *op)
  1357. {
  1358. u8 buf[RELATIVEJUMP_SIZE];
  1359. /* Set int3 to first byte for kprobes */
  1360. buf[0] = BREAKPOINT_INSTRUCTION;
  1361. memcpy(buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE);
  1362. text_poke_smp(op->kp.addr, buf, RELATIVEJUMP_SIZE);
  1363. }
  1364. static int __kprobes setup_detour_execution(struct kprobe *p,
  1365. struct pt_regs *regs,
  1366. int reenter)
  1367. {
  1368. struct optimized_kprobe *op;
  1369. if (p->flags & KPROBE_FLAG_OPTIMIZED) {
  1370. /* This kprobe is really able to run optimized path. */
  1371. op = container_of(p, struct optimized_kprobe, kp);
  1372. /* Detour through copied instructions */
  1373. regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
  1374. if (!reenter)
  1375. reset_current_kprobe();
  1376. preempt_enable_no_resched();
  1377. return 1;
  1378. }
  1379. return 0;
  1380. }
  1381. static int __kprobes init_poke_params(void)
  1382. {
  1383. /* Allocate code buffer and parameter array */
  1384. jump_poke_bufs = kmalloc(sizeof(struct jump_poke_buffer) *
  1385. MAX_OPTIMIZE_PROBES, GFP_KERNEL);
  1386. if (!jump_poke_bufs)
  1387. return -ENOMEM;
  1388. jump_poke_params = kmalloc(sizeof(struct text_poke_param) *
  1389. MAX_OPTIMIZE_PROBES, GFP_KERNEL);
  1390. if (!jump_poke_params) {
  1391. kfree(jump_poke_bufs);
  1392. jump_poke_bufs = NULL;
  1393. return -ENOMEM;
  1394. }
  1395. return 0;
  1396. }
  1397. #else /* !CONFIG_OPTPROBES */
  1398. static int __kprobes init_poke_params(void)
  1399. {
  1400. return 0;
  1401. }
  1402. #endif
  1403. int __init arch_init_kprobes(void)
  1404. {
  1405. return init_poke_params();
  1406. }
  1407. int __kprobes arch_trampoline_kprobe(struct kprobe *p)
  1408. {
  1409. return 0;
  1410. }