trace_kprobe.c 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494
  1. /*
  2. * Kprobes-based tracing events
  3. *
  4. * Created by Masami Hiramatsu <mhiramat@redhat.com>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  18. */
  19. #include <linux/module.h>
  20. #include <linux/uaccess.h>
  21. #include "trace_probe.h"
  22. #define KPROBE_EVENT_SYSTEM "kprobes"
  23. /**
  24. * Kprobe event core functions
  25. */
  26. struct trace_kprobe {
  27. struct list_head list;
  28. struct kretprobe rp; /* Use rp.kp for kprobe use */
  29. unsigned long nhit;
  30. const char *symbol; /* symbol name */
  31. struct trace_probe tp;
  32. };
  33. struct event_file_link {
  34. struct ftrace_event_file *file;
  35. struct list_head list;
  36. };
  37. #define SIZEOF_TRACE_KPROBE(n) \
  38. (offsetof(struct trace_kprobe, tp.args) + \
  39. (sizeof(struct probe_arg) * (n)))
  40. static __kprobes bool trace_kprobe_is_return(struct trace_kprobe *tk)
  41. {
  42. return tk->rp.handler != NULL;
  43. }
  44. static __kprobes const char *trace_kprobe_symbol(struct trace_kprobe *tk)
  45. {
  46. return tk->symbol ? tk->symbol : "unknown";
  47. }
  48. static __kprobes unsigned long trace_kprobe_offset(struct trace_kprobe *tk)
  49. {
  50. return tk->rp.kp.offset;
  51. }
  52. static __kprobes bool trace_kprobe_has_gone(struct trace_kprobe *tk)
  53. {
  54. return !!(kprobe_gone(&tk->rp.kp));
  55. }
  56. static __kprobes bool trace_kprobe_within_module(struct trace_kprobe *tk,
  57. struct module *mod)
  58. {
  59. int len = strlen(mod->name);
  60. const char *name = trace_kprobe_symbol(tk);
  61. return strncmp(mod->name, name, len) == 0 && name[len] == ':';
  62. }
  63. static __kprobes bool trace_kprobe_is_on_module(struct trace_kprobe *tk)
  64. {
  65. return !!strchr(trace_kprobe_symbol(tk), ':');
  66. }
  67. static int register_kprobe_event(struct trace_kprobe *tk);
  68. static int unregister_kprobe_event(struct trace_kprobe *tk);
  69. static DEFINE_MUTEX(probe_lock);
  70. static LIST_HEAD(probe_list);
  71. static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
  72. static int kretprobe_dispatcher(struct kretprobe_instance *ri,
  73. struct pt_regs *regs);
  74. /* Memory fetching by symbol */
  75. struct symbol_cache {
  76. char *symbol;
  77. long offset;
  78. unsigned long addr;
  79. };
  80. unsigned long update_symbol_cache(struct symbol_cache *sc)
  81. {
  82. sc->addr = (unsigned long)kallsyms_lookup_name(sc->symbol);
  83. if (sc->addr)
  84. sc->addr += sc->offset;
  85. return sc->addr;
  86. }
  87. void free_symbol_cache(struct symbol_cache *sc)
  88. {
  89. kfree(sc->symbol);
  90. kfree(sc);
  91. }
  92. struct symbol_cache *alloc_symbol_cache(const char *sym, long offset)
  93. {
  94. struct symbol_cache *sc;
  95. if (!sym || strlen(sym) == 0)
  96. return NULL;
  97. sc = kzalloc(sizeof(struct symbol_cache), GFP_KERNEL);
  98. if (!sc)
  99. return NULL;
  100. sc->symbol = kstrdup(sym, GFP_KERNEL);
  101. if (!sc->symbol) {
  102. kfree(sc);
  103. return NULL;
  104. }
  105. sc->offset = offset;
  106. update_symbol_cache(sc);
  107. return sc;
  108. }
  109. /*
  110. * Kprobes-specific fetch functions
  111. */
  112. #define DEFINE_FETCH_stack(type) \
  113. static __kprobes void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs,\
  114. void *offset, void *dest) \
  115. { \
  116. *(type *)dest = (type)regs_get_kernel_stack_nth(regs, \
  117. (unsigned int)((unsigned long)offset)); \
  118. }
  119. DEFINE_BASIC_FETCH_FUNCS(stack)
  120. /* No string on the stack entry */
  121. #define fetch_stack_string NULL
  122. #define fetch_stack_string_size NULL
  123. #define DEFINE_FETCH_memory(type) \
  124. static __kprobes void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs,\
  125. void *addr, void *dest) \
  126. { \
  127. type retval; \
  128. if (probe_kernel_address(addr, retval)) \
  129. *(type *)dest = 0; \
  130. else \
  131. *(type *)dest = retval; \
  132. }
  133. DEFINE_BASIC_FETCH_FUNCS(memory)
  134. /*
  135. * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
  136. * length and relative data location.
  137. */
  138. static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
  139. void *addr, void *dest)
  140. {
  141. long ret;
  142. int maxlen = get_rloc_len(*(u32 *)dest);
  143. u8 *dst = get_rloc_data(dest);
  144. u8 *src = addr;
  145. mm_segment_t old_fs = get_fs();
  146. if (!maxlen)
  147. return;
  148. /*
  149. * Try to get string again, since the string can be changed while
  150. * probing.
  151. */
  152. set_fs(KERNEL_DS);
  153. pagefault_disable();
  154. do
  155. ret = __copy_from_user_inatomic(dst++, src++, 1);
  156. while (dst[-1] && ret == 0 && src - (u8 *)addr < maxlen);
  157. dst[-1] = '\0';
  158. pagefault_enable();
  159. set_fs(old_fs);
  160. if (ret < 0) { /* Failed to fetch string */
  161. ((u8 *)get_rloc_data(dest))[0] = '\0';
  162. *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
  163. } else {
  164. *(u32 *)dest = make_data_rloc(src - (u8 *)addr,
  165. get_rloc_offs(*(u32 *)dest));
  166. }
  167. }
  168. /* Return the length of string -- including null terminal byte */
  169. static __kprobes void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
  170. void *addr, void *dest)
  171. {
  172. mm_segment_t old_fs;
  173. int ret, len = 0;
  174. u8 c;
  175. old_fs = get_fs();
  176. set_fs(KERNEL_DS);
  177. pagefault_disable();
  178. do {
  179. ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
  180. len++;
  181. } while (c && ret == 0 && len < MAX_STRING_SIZE);
  182. pagefault_enable();
  183. set_fs(old_fs);
  184. if (ret < 0) /* Failed to check the length */
  185. *(u32 *)dest = 0;
  186. else
  187. *(u32 *)dest = len;
  188. }
  189. #define DEFINE_FETCH_symbol(type) \
  190. __kprobes void FETCH_FUNC_NAME(symbol, type)(struct pt_regs *regs, \
  191. void *data, void *dest) \
  192. { \
  193. struct symbol_cache *sc = data; \
  194. if (sc->addr) \
  195. fetch_memory_##type(regs, (void *)sc->addr, dest); \
  196. else \
  197. *(type *)dest = 0; \
  198. }
  199. DEFINE_BASIC_FETCH_FUNCS(symbol)
  200. DEFINE_FETCH_symbol(string)
  201. DEFINE_FETCH_symbol(string_size)
  202. /* kprobes don't support file_offset fetch methods */
  203. #define fetch_file_offset_u8 NULL
  204. #define fetch_file_offset_u16 NULL
  205. #define fetch_file_offset_u32 NULL
  206. #define fetch_file_offset_u64 NULL
  207. #define fetch_file_offset_string NULL
  208. #define fetch_file_offset_string_size NULL
  209. /* Fetch type information table */
  210. const struct fetch_type kprobes_fetch_type_table[] = {
  211. /* Special types */
  212. [FETCH_TYPE_STRING] = __ASSIGN_FETCH_TYPE("string", string, string,
  213. sizeof(u32), 1, "__data_loc char[]"),
  214. [FETCH_TYPE_STRSIZE] = __ASSIGN_FETCH_TYPE("string_size", u32,
  215. string_size, sizeof(u32), 0, "u32"),
  216. /* Basic types */
  217. ASSIGN_FETCH_TYPE(u8, u8, 0),
  218. ASSIGN_FETCH_TYPE(u16, u16, 0),
  219. ASSIGN_FETCH_TYPE(u32, u32, 0),
  220. ASSIGN_FETCH_TYPE(u64, u64, 0),
  221. ASSIGN_FETCH_TYPE(s8, u8, 1),
  222. ASSIGN_FETCH_TYPE(s16, u16, 1),
  223. ASSIGN_FETCH_TYPE(s32, u32, 1),
  224. ASSIGN_FETCH_TYPE(s64, u64, 1),
  225. ASSIGN_FETCH_TYPE_END
  226. };
  227. /*
  228. * Allocate new trace_probe and initialize it (including kprobes).
  229. */
  230. static struct trace_kprobe *alloc_trace_kprobe(const char *group,
  231. const char *event,
  232. void *addr,
  233. const char *symbol,
  234. unsigned long offs,
  235. int nargs, bool is_return)
  236. {
  237. struct trace_kprobe *tk;
  238. int ret = -ENOMEM;
  239. tk = kzalloc(SIZEOF_TRACE_KPROBE(nargs), GFP_KERNEL);
  240. if (!tk)
  241. return ERR_PTR(ret);
  242. if (symbol) {
  243. tk->symbol = kstrdup(symbol, GFP_KERNEL);
  244. if (!tk->symbol)
  245. goto error;
  246. tk->rp.kp.symbol_name = tk->symbol;
  247. tk->rp.kp.offset = offs;
  248. } else
  249. tk->rp.kp.addr = addr;
  250. if (is_return)
  251. tk->rp.handler = kretprobe_dispatcher;
  252. else
  253. tk->rp.kp.pre_handler = kprobe_dispatcher;
  254. if (!event || !is_good_name(event)) {
  255. ret = -EINVAL;
  256. goto error;
  257. }
  258. tk->tp.call.class = &tk->tp.class;
  259. tk->tp.call.name = kstrdup(event, GFP_KERNEL);
  260. if (!tk->tp.call.name)
  261. goto error;
  262. if (!group || !is_good_name(group)) {
  263. ret = -EINVAL;
  264. goto error;
  265. }
  266. tk->tp.class.system = kstrdup(group, GFP_KERNEL);
  267. if (!tk->tp.class.system)
  268. goto error;
  269. INIT_LIST_HEAD(&tk->list);
  270. INIT_LIST_HEAD(&tk->tp.files);
  271. return tk;
  272. error:
  273. kfree(tk->tp.call.name);
  274. kfree(tk->symbol);
  275. kfree(tk);
  276. return ERR_PTR(ret);
  277. }
  278. static void free_trace_kprobe(struct trace_kprobe *tk)
  279. {
  280. int i;
  281. for (i = 0; i < tk->tp.nr_args; i++)
  282. traceprobe_free_probe_arg(&tk->tp.args[i]);
  283. kfree(tk->tp.call.class->system);
  284. kfree(tk->tp.call.name);
  285. kfree(tk->symbol);
  286. kfree(tk);
  287. }
  288. static struct trace_kprobe *find_trace_kprobe(const char *event,
  289. const char *group)
  290. {
  291. struct trace_kprobe *tk;
  292. list_for_each_entry(tk, &probe_list, list)
  293. if (strcmp(tk->tp.call.name, event) == 0 &&
  294. strcmp(tk->tp.call.class->system, group) == 0)
  295. return tk;
  296. return NULL;
  297. }
  298. /*
  299. * Enable trace_probe
  300. * if the file is NULL, enable "perf" handler, or enable "trace" handler.
  301. */
  302. static int
  303. enable_trace_kprobe(struct trace_kprobe *tk, struct ftrace_event_file *file)
  304. {
  305. int ret = 0;
  306. if (file) {
  307. struct event_file_link *link;
  308. link = kmalloc(sizeof(*link), GFP_KERNEL);
  309. if (!link) {
  310. ret = -ENOMEM;
  311. goto out;
  312. }
  313. link->file = file;
  314. list_add_tail_rcu(&link->list, &tk->tp.files);
  315. tk->tp.flags |= TP_FLAG_TRACE;
  316. } else
  317. tk->tp.flags |= TP_FLAG_PROFILE;
  318. if (trace_probe_is_registered(&tk->tp) && !trace_kprobe_has_gone(tk)) {
  319. if (trace_kprobe_is_return(tk))
  320. ret = enable_kretprobe(&tk->rp);
  321. else
  322. ret = enable_kprobe(&tk->rp.kp);
  323. }
  324. out:
  325. return ret;
  326. }
  327. static struct event_file_link *
  328. find_event_file_link(struct trace_probe *tp, struct ftrace_event_file *file)
  329. {
  330. struct event_file_link *link;
  331. list_for_each_entry(link, &tp->files, list)
  332. if (link->file == file)
  333. return link;
  334. return NULL;
  335. }
  336. /*
  337. * Disable trace_probe
  338. * if the file is NULL, disable "perf" handler, or disable "trace" handler.
  339. */
  340. static int
  341. disable_trace_kprobe(struct trace_kprobe *tk, struct ftrace_event_file *file)
  342. {
  343. struct event_file_link *link = NULL;
  344. int wait = 0;
  345. int ret = 0;
  346. if (file) {
  347. link = find_event_file_link(&tk->tp, file);
  348. if (!link) {
  349. ret = -EINVAL;
  350. goto out;
  351. }
  352. list_del_rcu(&link->list);
  353. wait = 1;
  354. if (!list_empty(&tk->tp.files))
  355. goto out;
  356. tk->tp.flags &= ~TP_FLAG_TRACE;
  357. } else
  358. tk->tp.flags &= ~TP_FLAG_PROFILE;
  359. if (!trace_probe_is_enabled(&tk->tp) && trace_probe_is_registered(&tk->tp)) {
  360. if (trace_kprobe_is_return(tk))
  361. disable_kretprobe(&tk->rp);
  362. else
  363. disable_kprobe(&tk->rp.kp);
  364. wait = 1;
  365. }
  366. out:
  367. if (wait) {
  368. /*
  369. * Synchronize with kprobe_trace_func/kretprobe_trace_func
  370. * to ensure disabled (all running handlers are finished).
  371. * This is not only for kfree(), but also the caller,
  372. * trace_remove_event_call() supposes it for releasing
  373. * event_call related objects, which will be accessed in
  374. * the kprobe_trace_func/kretprobe_trace_func.
  375. */
  376. synchronize_sched();
  377. kfree(link); /* Ignored if link == NULL */
  378. }
  379. return ret;
  380. }
  381. /* Internal register function - just handle k*probes and flags */
  382. static int __register_trace_kprobe(struct trace_kprobe *tk)
  383. {
  384. int i, ret;
  385. if (trace_probe_is_registered(&tk->tp))
  386. return -EINVAL;
  387. for (i = 0; i < tk->tp.nr_args; i++)
  388. traceprobe_update_arg(&tk->tp.args[i]);
  389. /* Set/clear disabled flag according to tp->flag */
  390. if (trace_probe_is_enabled(&tk->tp))
  391. tk->rp.kp.flags &= ~KPROBE_FLAG_DISABLED;
  392. else
  393. tk->rp.kp.flags |= KPROBE_FLAG_DISABLED;
  394. if (trace_kprobe_is_return(tk))
  395. ret = register_kretprobe(&tk->rp);
  396. else
  397. ret = register_kprobe(&tk->rp.kp);
  398. if (ret == 0)
  399. tk->tp.flags |= TP_FLAG_REGISTERED;
  400. else {
  401. pr_warning("Could not insert probe at %s+%lu: %d\n",
  402. trace_kprobe_symbol(tk), trace_kprobe_offset(tk), ret);
  403. if (ret == -ENOENT && trace_kprobe_is_on_module(tk)) {
  404. pr_warning("This probe might be able to register after"
  405. "target module is loaded. Continue.\n");
  406. ret = 0;
  407. } else if (ret == -EILSEQ) {
  408. pr_warning("Probing address(0x%p) is not an "
  409. "instruction boundary.\n",
  410. tk->rp.kp.addr);
  411. ret = -EINVAL;
  412. }
  413. }
  414. return ret;
  415. }
  416. /* Internal unregister function - just handle k*probes and flags */
  417. static void __unregister_trace_kprobe(struct trace_kprobe *tk)
  418. {
  419. if (trace_probe_is_registered(&tk->tp)) {
  420. if (trace_kprobe_is_return(tk))
  421. unregister_kretprobe(&tk->rp);
  422. else
  423. unregister_kprobe(&tk->rp.kp);
  424. tk->tp.flags &= ~TP_FLAG_REGISTERED;
  425. /* Cleanup kprobe for reuse */
  426. if (tk->rp.kp.symbol_name)
  427. tk->rp.kp.addr = NULL;
  428. }
  429. }
  430. /* Unregister a trace_probe and probe_event: call with locking probe_lock */
  431. static int unregister_trace_kprobe(struct trace_kprobe *tk)
  432. {
  433. /* Enabled event can not be unregistered */
  434. if (trace_probe_is_enabled(&tk->tp))
  435. return -EBUSY;
  436. /* Will fail if probe is being used by ftrace or perf */
  437. if (unregister_kprobe_event(tk))
  438. return -EBUSY;
  439. __unregister_trace_kprobe(tk);
  440. list_del(&tk->list);
  441. return 0;
  442. }
  443. /* Register a trace_probe and probe_event */
  444. static int register_trace_kprobe(struct trace_kprobe *tk)
  445. {
  446. struct trace_kprobe *old_tk;
  447. int ret;
  448. mutex_lock(&probe_lock);
  449. /* Delete old (same name) event if exist */
  450. old_tk = find_trace_kprobe(tk->tp.call.name, tk->tp.call.class->system);
  451. if (old_tk) {
  452. ret = unregister_trace_kprobe(old_tk);
  453. if (ret < 0)
  454. goto end;
  455. free_trace_kprobe(old_tk);
  456. }
  457. /* Register new event */
  458. ret = register_kprobe_event(tk);
  459. if (ret) {
  460. pr_warning("Failed to register probe event(%d)\n", ret);
  461. goto end;
  462. }
  463. /* Register k*probe */
  464. ret = __register_trace_kprobe(tk);
  465. if (ret < 0)
  466. unregister_kprobe_event(tk);
  467. else
  468. list_add_tail(&tk->list, &probe_list);
  469. end:
  470. mutex_unlock(&probe_lock);
  471. return ret;
  472. }
  473. /* Module notifier call back, checking event on the module */
  474. static int trace_kprobe_module_callback(struct notifier_block *nb,
  475. unsigned long val, void *data)
  476. {
  477. struct module *mod = data;
  478. struct trace_kprobe *tk;
  479. int ret;
  480. if (val != MODULE_STATE_COMING)
  481. return NOTIFY_DONE;
  482. /* Update probes on coming module */
  483. mutex_lock(&probe_lock);
  484. list_for_each_entry(tk, &probe_list, list) {
  485. if (trace_kprobe_within_module(tk, mod)) {
  486. /* Don't need to check busy - this should have gone. */
  487. __unregister_trace_kprobe(tk);
  488. ret = __register_trace_kprobe(tk);
  489. if (ret)
  490. pr_warning("Failed to re-register probe %s on"
  491. "%s: %d\n",
  492. tk->tp.call.name, mod->name, ret);
  493. }
  494. }
  495. mutex_unlock(&probe_lock);
  496. return NOTIFY_DONE;
  497. }
  498. static struct notifier_block trace_kprobe_module_nb = {
  499. .notifier_call = trace_kprobe_module_callback,
  500. .priority = 1 /* Invoked after kprobe module callback */
  501. };
  502. static int create_trace_kprobe(int argc, char **argv)
  503. {
  504. /*
  505. * Argument syntax:
  506. * - Add kprobe: p[:[GRP/]EVENT] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS]
  507. * - Add kretprobe: r[:[GRP/]EVENT] [MOD:]KSYM[+0] [FETCHARGS]
  508. * Fetch args:
  509. * $retval : fetch return value
  510. * $stack : fetch stack address
  511. * $stackN : fetch Nth of stack (N:0-)
  512. * @ADDR : fetch memory at ADDR (ADDR should be in kernel)
  513. * @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
  514. * %REG : fetch register REG
  515. * Dereferencing memory fetch:
  516. * +|-offs(ARG) : fetch memory at ARG +|- offs address.
  517. * Alias name of args:
  518. * NAME=FETCHARG : set NAME as alias of FETCHARG.
  519. * Type of args:
  520. * FETCHARG:TYPE : use TYPE instead of unsigned long.
  521. */
  522. struct trace_kprobe *tk;
  523. int i, ret = 0;
  524. bool is_return = false, is_delete = false;
  525. char *symbol = NULL, *event = NULL, *group = NULL;
  526. char *arg;
  527. unsigned long offset = 0;
  528. void *addr = NULL;
  529. char buf[MAX_EVENT_NAME_LEN];
  530. /* argc must be >= 1 */
  531. if (argv[0][0] == 'p')
  532. is_return = false;
  533. else if (argv[0][0] == 'r')
  534. is_return = true;
  535. else if (argv[0][0] == '-')
  536. is_delete = true;
  537. else {
  538. pr_info("Probe definition must be started with 'p', 'r' or"
  539. " '-'.\n");
  540. return -EINVAL;
  541. }
  542. if (argv[0][1] == ':') {
  543. event = &argv[0][2];
  544. if (strchr(event, '/')) {
  545. group = event;
  546. event = strchr(group, '/') + 1;
  547. event[-1] = '\0';
  548. if (strlen(group) == 0) {
  549. pr_info("Group name is not specified\n");
  550. return -EINVAL;
  551. }
  552. }
  553. if (strlen(event) == 0) {
  554. pr_info("Event name is not specified\n");
  555. return -EINVAL;
  556. }
  557. }
  558. if (!group)
  559. group = KPROBE_EVENT_SYSTEM;
  560. if (is_delete) {
  561. if (!event) {
  562. pr_info("Delete command needs an event name.\n");
  563. return -EINVAL;
  564. }
  565. mutex_lock(&probe_lock);
  566. tk = find_trace_kprobe(event, group);
  567. if (!tk) {
  568. mutex_unlock(&probe_lock);
  569. pr_info("Event %s/%s doesn't exist.\n", group, event);
  570. return -ENOENT;
  571. }
  572. /* delete an event */
  573. ret = unregister_trace_kprobe(tk);
  574. if (ret == 0)
  575. free_trace_kprobe(tk);
  576. mutex_unlock(&probe_lock);
  577. return ret;
  578. }
  579. if (argc < 2) {
  580. pr_info("Probe point is not specified.\n");
  581. return -EINVAL;
  582. }
  583. if (isdigit(argv[1][0])) {
  584. if (is_return) {
  585. pr_info("Return probe point must be a symbol.\n");
  586. return -EINVAL;
  587. }
  588. /* an address specified */
  589. ret = kstrtoul(&argv[1][0], 0, (unsigned long *)&addr);
  590. if (ret) {
  591. pr_info("Failed to parse address.\n");
  592. return ret;
  593. }
  594. } else {
  595. /* a symbol specified */
  596. symbol = argv[1];
  597. /* TODO: support .init module functions */
  598. ret = traceprobe_split_symbol_offset(symbol, &offset);
  599. if (ret) {
  600. pr_info("Failed to parse symbol.\n");
  601. return ret;
  602. }
  603. if (offset && is_return) {
  604. pr_info("Return probe must be used without offset.\n");
  605. return -EINVAL;
  606. }
  607. }
  608. argc -= 2; argv += 2;
  609. /* setup a probe */
  610. if (!event) {
  611. /* Make a new event name */
  612. if (symbol)
  613. snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld",
  614. is_return ? 'r' : 'p', symbol, offset);
  615. else
  616. snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p",
  617. is_return ? 'r' : 'p', addr);
  618. event = buf;
  619. }
  620. tk = alloc_trace_kprobe(group, event, addr, symbol, offset, argc,
  621. is_return);
  622. if (IS_ERR(tk)) {
  623. pr_info("Failed to allocate trace_probe.(%d)\n",
  624. (int)PTR_ERR(tk));
  625. return PTR_ERR(tk);
  626. }
  627. /* parse arguments */
  628. ret = 0;
  629. for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
  630. struct probe_arg *parg = &tk->tp.args[i];
  631. /* Increment count for freeing args in error case */
  632. tk->tp.nr_args++;
  633. /* Parse argument name */
  634. arg = strchr(argv[i], '=');
  635. if (arg) {
  636. *arg++ = '\0';
  637. parg->name = kstrdup(argv[i], GFP_KERNEL);
  638. } else {
  639. arg = argv[i];
  640. /* If argument name is omitted, set "argN" */
  641. snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1);
  642. parg->name = kstrdup(buf, GFP_KERNEL);
  643. }
  644. if (!parg->name) {
  645. pr_info("Failed to allocate argument[%d] name.\n", i);
  646. ret = -ENOMEM;
  647. goto error;
  648. }
  649. if (!is_good_name(parg->name)) {
  650. pr_info("Invalid argument[%d] name: %s\n",
  651. i, parg->name);
  652. ret = -EINVAL;
  653. goto error;
  654. }
  655. if (traceprobe_conflict_field_name(parg->name,
  656. tk->tp.args, i)) {
  657. pr_info("Argument[%d] name '%s' conflicts with "
  658. "another field.\n", i, argv[i]);
  659. ret = -EINVAL;
  660. goto error;
  661. }
  662. /* Parse fetch argument */
  663. ret = traceprobe_parse_probe_arg(arg, &tk->tp.size, parg,
  664. is_return, true);
  665. if (ret) {
  666. pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
  667. goto error;
  668. }
  669. }
  670. ret = register_trace_kprobe(tk);
  671. if (ret)
  672. goto error;
  673. return 0;
  674. error:
  675. free_trace_kprobe(tk);
  676. return ret;
  677. }
  678. static int release_all_trace_kprobes(void)
  679. {
  680. struct trace_kprobe *tk;
  681. int ret = 0;
  682. mutex_lock(&probe_lock);
  683. /* Ensure no probe is in use. */
  684. list_for_each_entry(tk, &probe_list, list)
  685. if (trace_probe_is_enabled(&tk->tp)) {
  686. ret = -EBUSY;
  687. goto end;
  688. }
  689. /* TODO: Use batch unregistration */
  690. while (!list_empty(&probe_list)) {
  691. tk = list_entry(probe_list.next, struct trace_kprobe, list);
  692. ret = unregister_trace_kprobe(tk);
  693. if (ret)
  694. goto end;
  695. free_trace_kprobe(tk);
  696. }
  697. end:
  698. mutex_unlock(&probe_lock);
  699. return ret;
  700. }
  701. /* Probes listing interfaces */
  702. static void *probes_seq_start(struct seq_file *m, loff_t *pos)
  703. {
  704. mutex_lock(&probe_lock);
  705. return seq_list_start(&probe_list, *pos);
  706. }
  707. static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
  708. {
  709. return seq_list_next(v, &probe_list, pos);
  710. }
  711. static void probes_seq_stop(struct seq_file *m, void *v)
  712. {
  713. mutex_unlock(&probe_lock);
  714. }
  715. static int probes_seq_show(struct seq_file *m, void *v)
  716. {
  717. struct trace_kprobe *tk = v;
  718. int i;
  719. seq_printf(m, "%c", trace_kprobe_is_return(tk) ? 'r' : 'p');
  720. seq_printf(m, ":%s/%s", tk->tp.call.class->system, tk->tp.call.name);
  721. if (!tk->symbol)
  722. seq_printf(m, " 0x%p", tk->rp.kp.addr);
  723. else if (tk->rp.kp.offset)
  724. seq_printf(m, " %s+%u", trace_kprobe_symbol(tk),
  725. tk->rp.kp.offset);
  726. else
  727. seq_printf(m, " %s", trace_kprobe_symbol(tk));
  728. for (i = 0; i < tk->tp.nr_args; i++)
  729. seq_printf(m, " %s=%s", tk->tp.args[i].name, tk->tp.args[i].comm);
  730. seq_printf(m, "\n");
  731. return 0;
  732. }
  733. static const struct seq_operations probes_seq_op = {
  734. .start = probes_seq_start,
  735. .next = probes_seq_next,
  736. .stop = probes_seq_stop,
  737. .show = probes_seq_show
  738. };
  739. static int probes_open(struct inode *inode, struct file *file)
  740. {
  741. int ret;
  742. if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
  743. ret = release_all_trace_kprobes();
  744. if (ret < 0)
  745. return ret;
  746. }
  747. return seq_open(file, &probes_seq_op);
  748. }
  749. static ssize_t probes_write(struct file *file, const char __user *buffer,
  750. size_t count, loff_t *ppos)
  751. {
  752. return traceprobe_probes_write(file, buffer, count, ppos,
  753. create_trace_kprobe);
  754. }
  755. static const struct file_operations kprobe_events_ops = {
  756. .owner = THIS_MODULE,
  757. .open = probes_open,
  758. .read = seq_read,
  759. .llseek = seq_lseek,
  760. .release = seq_release,
  761. .write = probes_write,
  762. };
  763. /* Probes profiling interfaces */
  764. static int probes_profile_seq_show(struct seq_file *m, void *v)
  765. {
  766. struct trace_kprobe *tk = v;
  767. seq_printf(m, " %-44s %15lu %15lu\n", tk->tp.call.name, tk->nhit,
  768. tk->rp.kp.nmissed);
  769. return 0;
  770. }
  771. static const struct seq_operations profile_seq_op = {
  772. .start = probes_seq_start,
  773. .next = probes_seq_next,
  774. .stop = probes_seq_stop,
  775. .show = probes_profile_seq_show
  776. };
  777. static int profile_open(struct inode *inode, struct file *file)
  778. {
  779. return seq_open(file, &profile_seq_op);
  780. }
  781. static const struct file_operations kprobe_profile_ops = {
  782. .owner = THIS_MODULE,
  783. .open = profile_open,
  784. .read = seq_read,
  785. .llseek = seq_lseek,
  786. .release = seq_release,
  787. };
  788. /* Kprobe handler */
  789. static __kprobes void
  790. __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
  791. struct ftrace_event_file *ftrace_file)
  792. {
  793. struct kprobe_trace_entry_head *entry;
  794. struct ring_buffer_event *event;
  795. struct ring_buffer *buffer;
  796. int size, dsize, pc;
  797. unsigned long irq_flags;
  798. struct ftrace_event_call *call = &tk->tp.call;
  799. WARN_ON(call != ftrace_file->event_call);
  800. if (ftrace_trigger_soft_disabled(ftrace_file))
  801. return;
  802. local_save_flags(irq_flags);
  803. pc = preempt_count();
  804. dsize = __get_data_size(&tk->tp, regs);
  805. size = sizeof(*entry) + tk->tp.size + dsize;
  806. event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
  807. call->event.type,
  808. size, irq_flags, pc);
  809. if (!event)
  810. return;
  811. entry = ring_buffer_event_data(event);
  812. entry->ip = (unsigned long)tk->rp.kp.addr;
  813. store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
  814. event_trigger_unlock_commit_regs(ftrace_file, buffer, event,
  815. entry, irq_flags, pc, regs);
  816. }
  817. static __kprobes void
  818. kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs)
  819. {
  820. struct event_file_link *link;
  821. list_for_each_entry_rcu(link, &tk->tp.files, list)
  822. __kprobe_trace_func(tk, regs, link->file);
  823. }
  824. /* Kretprobe handler */
  825. static __kprobes void
  826. __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
  827. struct pt_regs *regs,
  828. struct ftrace_event_file *ftrace_file)
  829. {
  830. struct kretprobe_trace_entry_head *entry;
  831. struct ring_buffer_event *event;
  832. struct ring_buffer *buffer;
  833. int size, pc, dsize;
  834. unsigned long irq_flags;
  835. struct ftrace_event_call *call = &tk->tp.call;
  836. WARN_ON(call != ftrace_file->event_call);
  837. if (ftrace_trigger_soft_disabled(ftrace_file))
  838. return;
  839. local_save_flags(irq_flags);
  840. pc = preempt_count();
  841. dsize = __get_data_size(&tk->tp, regs);
  842. size = sizeof(*entry) + tk->tp.size + dsize;
  843. event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
  844. call->event.type,
  845. size, irq_flags, pc);
  846. if (!event)
  847. return;
  848. entry = ring_buffer_event_data(event);
  849. entry->func = (unsigned long)tk->rp.kp.addr;
  850. entry->ret_ip = (unsigned long)ri->ret_addr;
  851. store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
  852. event_trigger_unlock_commit_regs(ftrace_file, buffer, event,
  853. entry, irq_flags, pc, regs);
  854. }
  855. static __kprobes void
  856. kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
  857. struct pt_regs *regs)
  858. {
  859. struct event_file_link *link;
  860. list_for_each_entry_rcu(link, &tk->tp.files, list)
  861. __kretprobe_trace_func(tk, ri, regs, link->file);
  862. }
  863. /* Event entry printers */
  864. static enum print_line_t
  865. print_kprobe_event(struct trace_iterator *iter, int flags,
  866. struct trace_event *event)
  867. {
  868. struct kprobe_trace_entry_head *field;
  869. struct trace_seq *s = &iter->seq;
  870. struct trace_probe *tp;
  871. u8 *data;
  872. int i;
  873. field = (struct kprobe_trace_entry_head *)iter->ent;
  874. tp = container_of(event, struct trace_probe, call.event);
  875. if (!trace_seq_printf(s, "%s: (", tp->call.name))
  876. goto partial;
  877. if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
  878. goto partial;
  879. if (!trace_seq_puts(s, ")"))
  880. goto partial;
  881. data = (u8 *)&field[1];
  882. for (i = 0; i < tp->nr_args; i++)
  883. if (!tp->args[i].type->print(s, tp->args[i].name,
  884. data + tp->args[i].offset, field))
  885. goto partial;
  886. if (!trace_seq_puts(s, "\n"))
  887. goto partial;
  888. return TRACE_TYPE_HANDLED;
  889. partial:
  890. return TRACE_TYPE_PARTIAL_LINE;
  891. }
  892. static enum print_line_t
  893. print_kretprobe_event(struct trace_iterator *iter, int flags,
  894. struct trace_event *event)
  895. {
  896. struct kretprobe_trace_entry_head *field;
  897. struct trace_seq *s = &iter->seq;
  898. struct trace_probe *tp;
  899. u8 *data;
  900. int i;
  901. field = (struct kretprobe_trace_entry_head *)iter->ent;
  902. tp = container_of(event, struct trace_probe, call.event);
  903. if (!trace_seq_printf(s, "%s: (", tp->call.name))
  904. goto partial;
  905. if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
  906. goto partial;
  907. if (!trace_seq_puts(s, " <- "))
  908. goto partial;
  909. if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
  910. goto partial;
  911. if (!trace_seq_puts(s, ")"))
  912. goto partial;
  913. data = (u8 *)&field[1];
  914. for (i = 0; i < tp->nr_args; i++)
  915. if (!tp->args[i].type->print(s, tp->args[i].name,
  916. data + tp->args[i].offset, field))
  917. goto partial;
  918. if (!trace_seq_puts(s, "\n"))
  919. goto partial;
  920. return TRACE_TYPE_HANDLED;
  921. partial:
  922. return TRACE_TYPE_PARTIAL_LINE;
  923. }
  924. static int kprobe_event_define_fields(struct ftrace_event_call *event_call)
  925. {
  926. int ret, i;
  927. struct kprobe_trace_entry_head field;
  928. struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data;
  929. DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
  930. /* Set argument names as fields */
  931. for (i = 0; i < tk->tp.nr_args; i++) {
  932. struct probe_arg *parg = &tk->tp.args[i];
  933. ret = trace_define_field(event_call, parg->type->fmttype,
  934. parg->name,
  935. sizeof(field) + parg->offset,
  936. parg->type->size,
  937. parg->type->is_signed,
  938. FILTER_OTHER);
  939. if (ret)
  940. return ret;
  941. }
  942. return 0;
  943. }
  944. static int kretprobe_event_define_fields(struct ftrace_event_call *event_call)
  945. {
  946. int ret, i;
  947. struct kretprobe_trace_entry_head field;
  948. struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data;
  949. DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
  950. DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
  951. /* Set argument names as fields */
  952. for (i = 0; i < tk->tp.nr_args; i++) {
  953. struct probe_arg *parg = &tk->tp.args[i];
  954. ret = trace_define_field(event_call, parg->type->fmttype,
  955. parg->name,
  956. sizeof(field) + parg->offset,
  957. parg->type->size,
  958. parg->type->is_signed,
  959. FILTER_OTHER);
  960. if (ret)
  961. return ret;
  962. }
  963. return 0;
  964. }
  965. #ifdef CONFIG_PERF_EVENTS
  966. /* Kprobe profile handler */
  967. static __kprobes void
  968. kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
  969. {
  970. struct ftrace_event_call *call = &tk->tp.call;
  971. struct kprobe_trace_entry_head *entry;
  972. struct hlist_head *head;
  973. int size, __size, dsize;
  974. int rctx;
  975. head = this_cpu_ptr(call->perf_events);
  976. if (hlist_empty(head))
  977. return;
  978. dsize = __get_data_size(&tk->tp, regs);
  979. __size = sizeof(*entry) + tk->tp.size + dsize;
  980. size = ALIGN(__size + sizeof(u32), sizeof(u64));
  981. size -= sizeof(u32);
  982. entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
  983. if (!entry)
  984. return;
  985. entry->ip = (unsigned long)tk->rp.kp.addr;
  986. memset(&entry[1], 0, dsize);
  987. store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
  988. perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL);
  989. }
  990. /* Kretprobe profile handler */
  991. static __kprobes void
  992. kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
  993. struct pt_regs *regs)
  994. {
  995. struct ftrace_event_call *call = &tk->tp.call;
  996. struct kretprobe_trace_entry_head *entry;
  997. struct hlist_head *head;
  998. int size, __size, dsize;
  999. int rctx;
  1000. head = this_cpu_ptr(call->perf_events);
  1001. if (hlist_empty(head))
  1002. return;
  1003. dsize = __get_data_size(&tk->tp, regs);
  1004. __size = sizeof(*entry) + tk->tp.size + dsize;
  1005. size = ALIGN(__size + sizeof(u32), sizeof(u64));
  1006. size -= sizeof(u32);
  1007. entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
  1008. if (!entry)
  1009. return;
  1010. entry->func = (unsigned long)tk->rp.kp.addr;
  1011. entry->ret_ip = (unsigned long)ri->ret_addr;
  1012. store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
  1013. perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL);
  1014. }
  1015. #endif /* CONFIG_PERF_EVENTS */
  1016. /*
  1017. * called by perf_trace_init() or __ftrace_set_clr_event() under event_mutex.
  1018. *
  1019. * kprobe_trace_self_tests_init() does enable_trace_probe/disable_trace_probe
  1020. * lockless, but we can't race with this __init function.
  1021. */
  1022. static __kprobes
  1023. int kprobe_register(struct ftrace_event_call *event,
  1024. enum trace_reg type, void *data)
  1025. {
  1026. struct trace_kprobe *tk = (struct trace_kprobe *)event->data;
  1027. struct ftrace_event_file *file = data;
  1028. switch (type) {
  1029. case TRACE_REG_REGISTER:
  1030. return enable_trace_kprobe(tk, file);
  1031. case TRACE_REG_UNREGISTER:
  1032. return disable_trace_kprobe(tk, file);
  1033. #ifdef CONFIG_PERF_EVENTS
  1034. case TRACE_REG_PERF_REGISTER:
  1035. return enable_trace_kprobe(tk, NULL);
  1036. case TRACE_REG_PERF_UNREGISTER:
  1037. return disable_trace_kprobe(tk, NULL);
  1038. case TRACE_REG_PERF_OPEN:
  1039. case TRACE_REG_PERF_CLOSE:
  1040. case TRACE_REG_PERF_ADD:
  1041. case TRACE_REG_PERF_DEL:
  1042. return 0;
  1043. #endif
  1044. }
  1045. return 0;
  1046. }
  1047. static __kprobes
  1048. int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
  1049. {
  1050. struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp);
  1051. tk->nhit++;
  1052. if (tk->tp.flags & TP_FLAG_TRACE)
  1053. kprobe_trace_func(tk, regs);
  1054. #ifdef CONFIG_PERF_EVENTS
  1055. if (tk->tp.flags & TP_FLAG_PROFILE)
  1056. kprobe_perf_func(tk, regs);
  1057. #endif
  1058. return 0; /* We don't tweek kernel, so just return 0 */
  1059. }
  1060. static __kprobes
  1061. int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
  1062. {
  1063. struct trace_kprobe *tk = container_of(ri->rp, struct trace_kprobe, rp);
  1064. tk->nhit++;
  1065. if (tk->tp.flags & TP_FLAG_TRACE)
  1066. kretprobe_trace_func(tk, ri, regs);
  1067. #ifdef CONFIG_PERF_EVENTS
  1068. if (tk->tp.flags & TP_FLAG_PROFILE)
  1069. kretprobe_perf_func(tk, ri, regs);
  1070. #endif
  1071. return 0; /* We don't tweek kernel, so just return 0 */
  1072. }
  1073. static struct trace_event_functions kretprobe_funcs = {
  1074. .trace = print_kretprobe_event
  1075. };
  1076. static struct trace_event_functions kprobe_funcs = {
  1077. .trace = print_kprobe_event
  1078. };
  1079. static int register_kprobe_event(struct trace_kprobe *tk)
  1080. {
  1081. struct ftrace_event_call *call = &tk->tp.call;
  1082. int ret;
  1083. /* Initialize ftrace_event_call */
  1084. INIT_LIST_HEAD(&call->class->fields);
  1085. if (trace_kprobe_is_return(tk)) {
  1086. call->event.funcs = &kretprobe_funcs;
  1087. call->class->define_fields = kretprobe_event_define_fields;
  1088. } else {
  1089. call->event.funcs = &kprobe_funcs;
  1090. call->class->define_fields = kprobe_event_define_fields;
  1091. }
  1092. if (set_print_fmt(&tk->tp, trace_kprobe_is_return(tk)) < 0)
  1093. return -ENOMEM;
  1094. ret = register_ftrace_event(&call->event);
  1095. if (!ret) {
  1096. kfree(call->print_fmt);
  1097. return -ENODEV;
  1098. }
  1099. call->flags = 0;
  1100. call->class->reg = kprobe_register;
  1101. call->data = tk;
  1102. ret = trace_add_event_call(call);
  1103. if (ret) {
  1104. pr_info("Failed to register kprobe event: %s\n", call->name);
  1105. kfree(call->print_fmt);
  1106. unregister_ftrace_event(&call->event);
  1107. }
  1108. return ret;
  1109. }
  1110. static int unregister_kprobe_event(struct trace_kprobe *tk)
  1111. {
  1112. int ret;
  1113. /* tp->event is unregistered in trace_remove_event_call() */
  1114. ret = trace_remove_event_call(&tk->tp.call);
  1115. if (!ret)
  1116. kfree(tk->tp.call.print_fmt);
  1117. return ret;
  1118. }
  1119. /* Make a debugfs interface for controlling probe points */
  1120. static __init int init_kprobe_trace(void)
  1121. {
  1122. struct dentry *d_tracer;
  1123. struct dentry *entry;
  1124. if (register_module_notifier(&trace_kprobe_module_nb))
  1125. return -EINVAL;
  1126. d_tracer = tracing_init_dentry();
  1127. if (!d_tracer)
  1128. return 0;
  1129. entry = debugfs_create_file("kprobe_events", 0644, d_tracer,
  1130. NULL, &kprobe_events_ops);
  1131. /* Event list interface */
  1132. if (!entry)
  1133. pr_warning("Could not create debugfs "
  1134. "'kprobe_events' entry\n");
  1135. /* Profile interface */
  1136. entry = debugfs_create_file("kprobe_profile", 0444, d_tracer,
  1137. NULL, &kprobe_profile_ops);
  1138. if (!entry)
  1139. pr_warning("Could not create debugfs "
  1140. "'kprobe_profile' entry\n");
  1141. return 0;
  1142. }
  1143. fs_initcall(init_kprobe_trace);
  1144. #ifdef CONFIG_FTRACE_STARTUP_TEST
  1145. /*
  1146. * The "__used" keeps gcc from removing the function symbol
  1147. * from the kallsyms table.
  1148. */
  1149. static __used int kprobe_trace_selftest_target(int a1, int a2, int a3,
  1150. int a4, int a5, int a6)
  1151. {
  1152. return a1 + a2 + a3 + a4 + a5 + a6;
  1153. }
  1154. static struct ftrace_event_file *
  1155. find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr)
  1156. {
  1157. struct ftrace_event_file *file;
  1158. list_for_each_entry(file, &tr->events, list)
  1159. if (file->event_call == &tk->tp.call)
  1160. return file;
  1161. return NULL;
  1162. }
  1163. /*
  1164. * Nobody but us can call enable_trace_kprobe/disable_trace_kprobe at this
  1165. * stage, we can do this lockless.
  1166. */
  1167. static __init int kprobe_trace_self_tests_init(void)
  1168. {
  1169. int ret, warn = 0;
  1170. int (*target)(int, int, int, int, int, int);
  1171. struct trace_kprobe *tk;
  1172. struct ftrace_event_file *file;
  1173. target = kprobe_trace_selftest_target;
  1174. pr_info("Testing kprobe tracing: ");
  1175. ret = traceprobe_command("p:testprobe kprobe_trace_selftest_target "
  1176. "$stack $stack0 +0($stack)",
  1177. create_trace_kprobe);
  1178. if (WARN_ON_ONCE(ret)) {
  1179. pr_warn("error on probing function entry.\n");
  1180. warn++;
  1181. } else {
  1182. /* Enable trace point */
  1183. tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
  1184. if (WARN_ON_ONCE(tk == NULL)) {
  1185. pr_warn("error on getting new probe.\n");
  1186. warn++;
  1187. } else {
  1188. file = find_trace_probe_file(tk, top_trace_array());
  1189. if (WARN_ON_ONCE(file == NULL)) {
  1190. pr_warn("error on getting probe file.\n");
  1191. warn++;
  1192. } else
  1193. enable_trace_kprobe(tk, file);
  1194. }
  1195. }
  1196. ret = traceprobe_command("r:testprobe2 kprobe_trace_selftest_target "
  1197. "$retval", create_trace_kprobe);
  1198. if (WARN_ON_ONCE(ret)) {
  1199. pr_warn("error on probing function return.\n");
  1200. warn++;
  1201. } else {
  1202. /* Enable trace point */
  1203. tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
  1204. if (WARN_ON_ONCE(tk == NULL)) {
  1205. pr_warn("error on getting 2nd new probe.\n");
  1206. warn++;
  1207. } else {
  1208. file = find_trace_probe_file(tk, top_trace_array());
  1209. if (WARN_ON_ONCE(file == NULL)) {
  1210. pr_warn("error on getting probe file.\n");
  1211. warn++;
  1212. } else
  1213. enable_trace_kprobe(tk, file);
  1214. }
  1215. }
  1216. if (warn)
  1217. goto end;
  1218. ret = target(1, 2, 3, 4, 5, 6);
  1219. /* Disable trace points before removing it */
  1220. tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
  1221. if (WARN_ON_ONCE(tk == NULL)) {
  1222. pr_warn("error on getting test probe.\n");
  1223. warn++;
  1224. } else {
  1225. file = find_trace_probe_file(tk, top_trace_array());
  1226. if (WARN_ON_ONCE(file == NULL)) {
  1227. pr_warn("error on getting probe file.\n");
  1228. warn++;
  1229. } else
  1230. disable_trace_kprobe(tk, file);
  1231. }
  1232. tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
  1233. if (WARN_ON_ONCE(tk == NULL)) {
  1234. pr_warn("error on getting 2nd test probe.\n");
  1235. warn++;
  1236. } else {
  1237. file = find_trace_probe_file(tk, top_trace_array());
  1238. if (WARN_ON_ONCE(file == NULL)) {
  1239. pr_warn("error on getting probe file.\n");
  1240. warn++;
  1241. } else
  1242. disable_trace_kprobe(tk, file);
  1243. }
  1244. ret = traceprobe_command("-:testprobe", create_trace_kprobe);
  1245. if (WARN_ON_ONCE(ret)) {
  1246. pr_warn("error on deleting a probe.\n");
  1247. warn++;
  1248. }
  1249. ret = traceprobe_command("-:testprobe2", create_trace_kprobe);
  1250. if (WARN_ON_ONCE(ret)) {
  1251. pr_warn("error on deleting a probe.\n");
  1252. warn++;
  1253. }
  1254. end:
  1255. release_all_trace_kprobes();
  1256. if (warn)
  1257. pr_cont("NG: Some tests are failed. Please check them.\n");
  1258. else
  1259. pr_cont("OK\n");
  1260. return 0;
  1261. }
  1262. late_initcall(kprobe_trace_self_tests_init);
  1263. #endif