trace_kprobe.c 39 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Kprobes-based tracing events
  4. *
  5. * Created by Masami Hiramatsu <mhiramat@redhat.com>
  6. *
  7. */
  8. #define pr_fmt(fmt) "trace_kprobe: " fmt
  9. #include <linux/module.h>
  10. #include <linux/uaccess.h>
  11. #include <linux/rculist.h>
  12. #include <linux/error-injection.h>
  13. #include "trace_kprobe_selftest.h"
  14. #include "trace_probe.h"
  15. #include "trace_probe_tmpl.h"
  16. #define KPROBE_EVENT_SYSTEM "kprobes"
  17. #define KRETPROBE_MAXACTIVE_MAX 4096
  18. /**
  19. * Kprobe event core functions
  20. */
  21. struct trace_kprobe {
  22. struct list_head list;
  23. struct kretprobe rp; /* Use rp.kp for kprobe use */
  24. unsigned long __percpu *nhit;
  25. const char *symbol; /* symbol name */
  26. struct trace_probe tp;
  27. };
  28. #define SIZEOF_TRACE_KPROBE(n) \
  29. (offsetof(struct trace_kprobe, tp.args) + \
  30. (sizeof(struct probe_arg) * (n)))
  31. static nokprobe_inline bool trace_kprobe_is_return(struct trace_kprobe *tk)
  32. {
  33. return tk->rp.handler != NULL;
  34. }
  35. static nokprobe_inline const char *trace_kprobe_symbol(struct trace_kprobe *tk)
  36. {
  37. return tk->symbol ? tk->symbol : "unknown";
  38. }
  39. static nokprobe_inline unsigned long trace_kprobe_offset(struct trace_kprobe *tk)
  40. {
  41. return tk->rp.kp.offset;
  42. }
  43. static nokprobe_inline bool trace_kprobe_has_gone(struct trace_kprobe *tk)
  44. {
  45. return !!(kprobe_gone(&tk->rp.kp));
  46. }
  47. static nokprobe_inline bool trace_kprobe_within_module(struct trace_kprobe *tk,
  48. struct module *mod)
  49. {
  50. int len = strlen(mod->name);
  51. const char *name = trace_kprobe_symbol(tk);
  52. return strncmp(mod->name, name, len) == 0 && name[len] == ':';
  53. }
  54. static nokprobe_inline bool trace_kprobe_module_exist(struct trace_kprobe *tk)
  55. {
  56. char *p;
  57. bool ret;
  58. if (!tk->symbol)
  59. return false;
  60. p = strchr(tk->symbol, ':');
  61. if (!p)
  62. return true;
  63. *p = '\0';
  64. mutex_lock(&module_mutex);
  65. ret = !!find_module(tk->symbol);
  66. mutex_unlock(&module_mutex);
  67. *p = ':';
  68. return ret;
  69. }
  70. static nokprobe_inline unsigned long trace_kprobe_nhit(struct trace_kprobe *tk)
  71. {
  72. unsigned long nhit = 0;
  73. int cpu;
  74. for_each_possible_cpu(cpu)
  75. nhit += *per_cpu_ptr(tk->nhit, cpu);
  76. return nhit;
  77. }
  78. /* Return 0 if it fails to find the symbol address */
  79. static nokprobe_inline
  80. unsigned long trace_kprobe_address(struct trace_kprobe *tk)
  81. {
  82. unsigned long addr;
  83. if (tk->symbol) {
  84. addr = (unsigned long)
  85. kallsyms_lookup_name(trace_kprobe_symbol(tk));
  86. if (addr)
  87. addr += tk->rp.kp.offset;
  88. } else {
  89. addr = (unsigned long)tk->rp.kp.addr;
  90. }
  91. return addr;
  92. }
  93. bool trace_kprobe_on_func_entry(struct trace_event_call *call)
  94. {
  95. struct trace_kprobe *tk = (struct trace_kprobe *)call->data;
  96. return kprobe_on_func_entry(tk->rp.kp.addr,
  97. tk->rp.kp.addr ? NULL : tk->rp.kp.symbol_name,
  98. tk->rp.kp.addr ? 0 : tk->rp.kp.offset);
  99. }
  100. bool trace_kprobe_error_injectable(struct trace_event_call *call)
  101. {
  102. struct trace_kprobe *tk = (struct trace_kprobe *)call->data;
  103. return within_error_injection_list(trace_kprobe_address(tk));
  104. }
  105. static int register_kprobe_event(struct trace_kprobe *tk);
  106. static int unregister_kprobe_event(struct trace_kprobe *tk);
  107. static DEFINE_MUTEX(probe_lock);
  108. static LIST_HEAD(probe_list);
  109. static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
  110. static int kretprobe_dispatcher(struct kretprobe_instance *ri,
  111. struct pt_regs *regs);
  112. /*
  113. * Allocate new trace_probe and initialize it (including kprobes).
  114. */
  115. static struct trace_kprobe *alloc_trace_kprobe(const char *group,
  116. const char *event,
  117. void *addr,
  118. const char *symbol,
  119. unsigned long offs,
  120. int maxactive,
  121. int nargs, bool is_return)
  122. {
  123. struct trace_kprobe *tk;
  124. int ret = -ENOMEM;
  125. tk = kzalloc(SIZEOF_TRACE_KPROBE(nargs), GFP_KERNEL);
  126. if (!tk)
  127. return ERR_PTR(ret);
  128. tk->nhit = alloc_percpu(unsigned long);
  129. if (!tk->nhit)
  130. goto error;
  131. if (symbol) {
  132. tk->symbol = kstrdup(symbol, GFP_KERNEL);
  133. if (!tk->symbol)
  134. goto error;
  135. tk->rp.kp.symbol_name = tk->symbol;
  136. tk->rp.kp.offset = offs;
  137. } else
  138. tk->rp.kp.addr = addr;
  139. if (is_return)
  140. tk->rp.handler = kretprobe_dispatcher;
  141. else
  142. tk->rp.kp.pre_handler = kprobe_dispatcher;
  143. tk->rp.maxactive = maxactive;
  144. if (!event || !is_good_name(event)) {
  145. ret = -EINVAL;
  146. goto error;
  147. }
  148. tk->tp.call.class = &tk->tp.class;
  149. tk->tp.call.name = kstrdup(event, GFP_KERNEL);
  150. if (!tk->tp.call.name)
  151. goto error;
  152. if (!group || !is_good_name(group)) {
  153. ret = -EINVAL;
  154. goto error;
  155. }
  156. tk->tp.class.system = kstrdup(group, GFP_KERNEL);
  157. if (!tk->tp.class.system)
  158. goto error;
  159. INIT_LIST_HEAD(&tk->list);
  160. INIT_LIST_HEAD(&tk->tp.files);
  161. return tk;
  162. error:
  163. kfree(tk->tp.call.name);
  164. kfree(tk->symbol);
  165. free_percpu(tk->nhit);
  166. kfree(tk);
  167. return ERR_PTR(ret);
  168. }
  169. static void free_trace_kprobe(struct trace_kprobe *tk)
  170. {
  171. int i;
  172. for (i = 0; i < tk->tp.nr_args; i++)
  173. traceprobe_free_probe_arg(&tk->tp.args[i]);
  174. kfree(tk->tp.call.class->system);
  175. kfree(tk->tp.call.name);
  176. kfree(tk->symbol);
  177. free_percpu(tk->nhit);
  178. kfree(tk);
  179. }
  180. static struct trace_kprobe *find_trace_kprobe(const char *event,
  181. const char *group)
  182. {
  183. struct trace_kprobe *tk;
  184. list_for_each_entry(tk, &probe_list, list)
  185. if (strcmp(trace_event_name(&tk->tp.call), event) == 0 &&
  186. strcmp(tk->tp.call.class->system, group) == 0)
  187. return tk;
  188. return NULL;
  189. }
  190. static inline int __enable_trace_kprobe(struct trace_kprobe *tk)
  191. {
  192. int ret = 0;
  193. if (trace_probe_is_registered(&tk->tp) && !trace_kprobe_has_gone(tk)) {
  194. if (trace_kprobe_is_return(tk))
  195. ret = enable_kretprobe(&tk->rp);
  196. else
  197. ret = enable_kprobe(&tk->rp.kp);
  198. }
  199. return ret;
  200. }
  201. /*
  202. * Enable trace_probe
  203. * if the file is NULL, enable "perf" handler, or enable "trace" handler.
  204. */
  205. static int
  206. enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
  207. {
  208. struct event_file_link *link;
  209. int ret = 0;
  210. if (file) {
  211. link = kmalloc(sizeof(*link), GFP_KERNEL);
  212. if (!link) {
  213. ret = -ENOMEM;
  214. goto out;
  215. }
  216. link->file = file;
  217. list_add_tail_rcu(&link->list, &tk->tp.files);
  218. tk->tp.flags |= TP_FLAG_TRACE;
  219. ret = __enable_trace_kprobe(tk);
  220. if (ret) {
  221. list_del_rcu(&link->list);
  222. kfree(link);
  223. tk->tp.flags &= ~TP_FLAG_TRACE;
  224. }
  225. } else {
  226. tk->tp.flags |= TP_FLAG_PROFILE;
  227. ret = __enable_trace_kprobe(tk);
  228. if (ret)
  229. tk->tp.flags &= ~TP_FLAG_PROFILE;
  230. }
  231. out:
  232. return ret;
  233. }
  234. /*
  235. * Disable trace_probe
  236. * if the file is NULL, disable "perf" handler, or disable "trace" handler.
  237. */
  238. static int
  239. disable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
  240. {
  241. struct event_file_link *link = NULL;
  242. int wait = 0;
  243. int ret = 0;
  244. if (file) {
  245. link = find_event_file_link(&tk->tp, file);
  246. if (!link) {
  247. ret = -EINVAL;
  248. goto out;
  249. }
  250. list_del_rcu(&link->list);
  251. wait = 1;
  252. if (!list_empty(&tk->tp.files))
  253. goto out;
  254. tk->tp.flags &= ~TP_FLAG_TRACE;
  255. } else
  256. tk->tp.flags &= ~TP_FLAG_PROFILE;
  257. if (!trace_probe_is_enabled(&tk->tp) && trace_probe_is_registered(&tk->tp)) {
  258. if (trace_kprobe_is_return(tk))
  259. disable_kretprobe(&tk->rp);
  260. else
  261. disable_kprobe(&tk->rp.kp);
  262. wait = 1;
  263. }
  264. /*
  265. * if tk is not added to any list, it must be a local trace_kprobe
  266. * created with perf_event_open. We don't need to wait for these
  267. * trace_kprobes
  268. */
  269. if (list_empty(&tk->list))
  270. wait = 0;
  271. out:
  272. if (wait) {
  273. /*
  274. * Synchronize with kprobe_trace_func/kretprobe_trace_func
  275. * to ensure disabled (all running handlers are finished).
  276. * This is not only for kfree(), but also the caller,
  277. * trace_remove_event_call() supposes it for releasing
  278. * event_call related objects, which will be accessed in
  279. * the kprobe_trace_func/kretprobe_trace_func.
  280. */
  281. synchronize_sched();
  282. kfree(link); /* Ignored if link == NULL */
  283. }
  284. return ret;
  285. }
  286. #if defined(CONFIG_KPROBES_ON_FTRACE) && \
  287. !defined(CONFIG_KPROBE_EVENTS_ON_NOTRACE)
  288. static bool within_notrace_func(struct trace_kprobe *tk)
  289. {
  290. unsigned long offset, size, addr;
  291. addr = trace_kprobe_address(tk);
  292. if (!addr || !kallsyms_lookup_size_offset(addr, &size, &offset))
  293. return false;
  294. /* Get the entry address of the target function */
  295. addr -= offset;
  296. /*
  297. * Since ftrace_location_range() does inclusive range check, we need
  298. * to subtract 1 byte from the end address.
  299. */
  300. return !ftrace_location_range(addr, addr + size - 1);
  301. }
  302. #else
  303. #define within_notrace_func(tk) (false)
  304. #endif
  305. /* Internal register function - just handle k*probes and flags */
  306. static int __register_trace_kprobe(struct trace_kprobe *tk)
  307. {
  308. int i, ret;
  309. if (trace_probe_is_registered(&tk->tp))
  310. return -EINVAL;
  311. if (within_notrace_func(tk)) {
  312. pr_warn("Could not probe notrace function %s\n",
  313. trace_kprobe_symbol(tk));
  314. return -EINVAL;
  315. }
  316. for (i = 0; i < tk->tp.nr_args; i++) {
  317. ret = traceprobe_update_arg(&tk->tp.args[i]);
  318. if (ret)
  319. return ret;
  320. }
  321. /* Set/clear disabled flag according to tp->flag */
  322. if (trace_probe_is_enabled(&tk->tp))
  323. tk->rp.kp.flags &= ~KPROBE_FLAG_DISABLED;
  324. else
  325. tk->rp.kp.flags |= KPROBE_FLAG_DISABLED;
  326. if (trace_kprobe_is_return(tk))
  327. ret = register_kretprobe(&tk->rp);
  328. else
  329. ret = register_kprobe(&tk->rp.kp);
  330. if (ret == 0) {
  331. tk->tp.flags |= TP_FLAG_REGISTERED;
  332. } else if (ret == -EILSEQ) {
  333. pr_warn("Probing address(0x%p) is not an instruction boundary.\n",
  334. tk->rp.kp.addr);
  335. ret = -EINVAL;
  336. }
  337. return ret;
  338. }
  339. /* Internal unregister function - just handle k*probes and flags */
  340. static void __unregister_trace_kprobe(struct trace_kprobe *tk)
  341. {
  342. if (trace_probe_is_registered(&tk->tp)) {
  343. if (trace_kprobe_is_return(tk))
  344. unregister_kretprobe(&tk->rp);
  345. else
  346. unregister_kprobe(&tk->rp.kp);
  347. tk->tp.flags &= ~TP_FLAG_REGISTERED;
  348. /* Cleanup kprobe for reuse */
  349. if (tk->rp.kp.symbol_name)
  350. tk->rp.kp.addr = NULL;
  351. }
  352. }
  353. /* Unregister a trace_probe and probe_event: call with locking probe_lock */
  354. static int unregister_trace_kprobe(struct trace_kprobe *tk)
  355. {
  356. /* Enabled event can not be unregistered */
  357. if (trace_probe_is_enabled(&tk->tp))
  358. return -EBUSY;
  359. /* Will fail if probe is being used by ftrace or perf */
  360. if (unregister_kprobe_event(tk))
  361. return -EBUSY;
  362. __unregister_trace_kprobe(tk);
  363. list_del(&tk->list);
  364. return 0;
  365. }
  366. /* Register a trace_probe and probe_event */
  367. static int register_trace_kprobe(struct trace_kprobe *tk)
  368. {
  369. struct trace_kprobe *old_tk;
  370. int ret;
  371. mutex_lock(&probe_lock);
  372. /* Delete old (same name) event if exist */
  373. old_tk = find_trace_kprobe(trace_event_name(&tk->tp.call),
  374. tk->tp.call.class->system);
  375. if (old_tk) {
  376. ret = unregister_trace_kprobe(old_tk);
  377. if (ret < 0)
  378. goto end;
  379. free_trace_kprobe(old_tk);
  380. }
  381. /* Register new event */
  382. ret = register_kprobe_event(tk);
  383. if (ret) {
  384. pr_warn("Failed to register probe event(%d)\n", ret);
  385. goto end;
  386. }
  387. /* Register k*probe */
  388. ret = __register_trace_kprobe(tk);
  389. if (ret == -ENOENT && !trace_kprobe_module_exist(tk)) {
  390. pr_warn("This probe might be able to register after target module is loaded. Continue.\n");
  391. ret = 0;
  392. }
  393. if (ret < 0)
  394. unregister_kprobe_event(tk);
  395. else
  396. list_add_tail(&tk->list, &probe_list);
  397. end:
  398. mutex_unlock(&probe_lock);
  399. return ret;
  400. }
  401. /* Module notifier call back, checking event on the module */
  402. static int trace_kprobe_module_callback(struct notifier_block *nb,
  403. unsigned long val, void *data)
  404. {
  405. struct module *mod = data;
  406. struct trace_kprobe *tk;
  407. int ret;
  408. if (val != MODULE_STATE_COMING)
  409. return NOTIFY_DONE;
  410. /* Update probes on coming module */
  411. mutex_lock(&probe_lock);
  412. list_for_each_entry(tk, &probe_list, list) {
  413. if (trace_kprobe_within_module(tk, mod)) {
  414. /* Don't need to check busy - this should have gone. */
  415. __unregister_trace_kprobe(tk);
  416. ret = __register_trace_kprobe(tk);
  417. if (ret)
  418. pr_warn("Failed to re-register probe %s on %s: %d\n",
  419. trace_event_name(&tk->tp.call),
  420. mod->name, ret);
  421. }
  422. }
  423. mutex_unlock(&probe_lock);
  424. return NOTIFY_DONE;
  425. }
  426. static struct notifier_block trace_kprobe_module_nb = {
  427. .notifier_call = trace_kprobe_module_callback,
  428. .priority = 1 /* Invoked after kprobe module callback */
  429. };
  430. /* Convert certain expected symbols into '_' when generating event names */
  431. static inline void sanitize_event_name(char *name)
  432. {
  433. while (*name++ != '\0')
  434. if (*name == ':' || *name == '.')
  435. *name = '_';
  436. }
  437. static int create_trace_kprobe(int argc, char **argv)
  438. {
  439. /*
  440. * Argument syntax:
  441. * - Add kprobe:
  442. * p[:[GRP/]EVENT] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS]
  443. * - Add kretprobe:
  444. * r[MAXACTIVE][:[GRP/]EVENT] [MOD:]KSYM[+0] [FETCHARGS]
  445. * Fetch args:
  446. * $retval : fetch return value
  447. * $stack : fetch stack address
  448. * $stackN : fetch Nth of stack (N:0-)
  449. * $comm : fetch current task comm
  450. * @ADDR : fetch memory at ADDR (ADDR should be in kernel)
  451. * @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
  452. * %REG : fetch register REG
  453. * Dereferencing memory fetch:
  454. * +|-offs(ARG) : fetch memory at ARG +|- offs address.
  455. * Alias name of args:
  456. * NAME=FETCHARG : set NAME as alias of FETCHARG.
  457. * Type of args:
  458. * FETCHARG:TYPE : use TYPE instead of unsigned long.
  459. */
  460. struct trace_kprobe *tk;
  461. int i, ret = 0;
  462. bool is_return = false, is_delete = false;
  463. char *symbol = NULL, *event = NULL, *group = NULL;
  464. int maxactive = 0;
  465. char *arg;
  466. long offset = 0;
  467. void *addr = NULL;
  468. char buf[MAX_EVENT_NAME_LEN];
  469. unsigned int flags = TPARG_FL_KERNEL;
  470. /* argc must be >= 1 */
  471. if (argv[0][0] == 'p')
  472. is_return = false;
  473. else if (argv[0][0] == 'r') {
  474. is_return = true;
  475. flags |= TPARG_FL_RETURN;
  476. } else if (argv[0][0] == '-')
  477. is_delete = true;
  478. else {
  479. pr_info("Probe definition must be started with 'p', 'r' or"
  480. " '-'.\n");
  481. return -EINVAL;
  482. }
  483. event = strchr(&argv[0][1], ':');
  484. if (event) {
  485. event[0] = '\0';
  486. event++;
  487. }
  488. if (is_return && isdigit(argv[0][1])) {
  489. ret = kstrtouint(&argv[0][1], 0, &maxactive);
  490. if (ret) {
  491. pr_info("Failed to parse maxactive.\n");
  492. return ret;
  493. }
  494. /* kretprobes instances are iterated over via a list. The
  495. * maximum should stay reasonable.
  496. */
  497. if (maxactive > KRETPROBE_MAXACTIVE_MAX) {
  498. pr_info("Maxactive is too big (%d > %d).\n",
  499. maxactive, KRETPROBE_MAXACTIVE_MAX);
  500. return -E2BIG;
  501. }
  502. }
  503. if (event) {
  504. char *slash;
  505. slash = strchr(event, '/');
  506. if (slash) {
  507. group = event;
  508. event = slash + 1;
  509. slash[0] = '\0';
  510. if (strlen(group) == 0) {
  511. pr_info("Group name is not specified\n");
  512. return -EINVAL;
  513. }
  514. }
  515. if (strlen(event) == 0) {
  516. pr_info("Event name is not specified\n");
  517. return -EINVAL;
  518. }
  519. }
  520. if (!group)
  521. group = KPROBE_EVENT_SYSTEM;
  522. if (is_delete) {
  523. if (!event) {
  524. pr_info("Delete command needs an event name.\n");
  525. return -EINVAL;
  526. }
  527. mutex_lock(&probe_lock);
  528. tk = find_trace_kprobe(event, group);
  529. if (!tk) {
  530. mutex_unlock(&probe_lock);
  531. pr_info("Event %s/%s doesn't exist.\n", group, event);
  532. return -ENOENT;
  533. }
  534. /* delete an event */
  535. ret = unregister_trace_kprobe(tk);
  536. if (ret == 0)
  537. free_trace_kprobe(tk);
  538. mutex_unlock(&probe_lock);
  539. return ret;
  540. }
  541. if (argc < 2) {
  542. pr_info("Probe point is not specified.\n");
  543. return -EINVAL;
  544. }
  545. /* try to parse an address. if that fails, try to read the
  546. * input as a symbol. */
  547. if (kstrtoul(argv[1], 0, (unsigned long *)&addr)) {
  548. /* a symbol specified */
  549. symbol = argv[1];
  550. /* TODO: support .init module functions */
  551. ret = traceprobe_split_symbol_offset(symbol, &offset);
  552. if (ret || offset < 0 || offset > UINT_MAX) {
  553. pr_info("Failed to parse either an address or a symbol.\n");
  554. return ret;
  555. }
  556. if (kprobe_on_func_entry(NULL, symbol, offset))
  557. flags |= TPARG_FL_FENTRY;
  558. if (offset && is_return && !(flags & TPARG_FL_FENTRY)) {
  559. pr_info("Given offset is not valid for return probe.\n");
  560. return -EINVAL;
  561. }
  562. }
  563. argc -= 2; argv += 2;
  564. /* setup a probe */
  565. if (!event) {
  566. /* Make a new event name */
  567. if (symbol)
  568. snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld",
  569. is_return ? 'r' : 'p', symbol, offset);
  570. else
  571. snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p",
  572. is_return ? 'r' : 'p', addr);
  573. sanitize_event_name(buf);
  574. event = buf;
  575. }
  576. tk = alloc_trace_kprobe(group, event, addr, symbol, offset, maxactive,
  577. argc, is_return);
  578. if (IS_ERR(tk)) {
  579. pr_info("Failed to allocate trace_probe.(%d)\n",
  580. (int)PTR_ERR(tk));
  581. return PTR_ERR(tk);
  582. }
  583. /* parse arguments */
  584. ret = 0;
  585. for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
  586. struct probe_arg *parg = &tk->tp.args[i];
  587. /* Increment count for freeing args in error case */
  588. tk->tp.nr_args++;
  589. /* Parse argument name */
  590. arg = strchr(argv[i], '=');
  591. if (arg) {
  592. *arg++ = '\0';
  593. parg->name = kstrdup(argv[i], GFP_KERNEL);
  594. } else {
  595. arg = argv[i];
  596. /* If argument name is omitted, set "argN" */
  597. snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1);
  598. parg->name = kstrdup(buf, GFP_KERNEL);
  599. }
  600. if (!parg->name) {
  601. pr_info("Failed to allocate argument[%d] name.\n", i);
  602. ret = -ENOMEM;
  603. goto error;
  604. }
  605. if (!is_good_name(parg->name)) {
  606. pr_info("Invalid argument[%d] name: %s\n",
  607. i, parg->name);
  608. ret = -EINVAL;
  609. goto error;
  610. }
  611. if (traceprobe_conflict_field_name(parg->name,
  612. tk->tp.args, i)) {
  613. pr_info("Argument[%d] name '%s' conflicts with "
  614. "another field.\n", i, argv[i]);
  615. ret = -EINVAL;
  616. goto error;
  617. }
  618. /* Parse fetch argument */
  619. ret = traceprobe_parse_probe_arg(arg, &tk->tp.size, parg,
  620. flags);
  621. if (ret) {
  622. pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
  623. goto error;
  624. }
  625. }
  626. ret = register_trace_kprobe(tk);
  627. if (ret)
  628. goto error;
  629. return 0;
  630. error:
  631. free_trace_kprobe(tk);
  632. return ret;
  633. }
  634. static int release_all_trace_kprobes(void)
  635. {
  636. struct trace_kprobe *tk;
  637. int ret = 0;
  638. mutex_lock(&probe_lock);
  639. /* Ensure no probe is in use. */
  640. list_for_each_entry(tk, &probe_list, list)
  641. if (trace_probe_is_enabled(&tk->tp)) {
  642. ret = -EBUSY;
  643. goto end;
  644. }
  645. /* TODO: Use batch unregistration */
  646. while (!list_empty(&probe_list)) {
  647. tk = list_entry(probe_list.next, struct trace_kprobe, list);
  648. ret = unregister_trace_kprobe(tk);
  649. if (ret)
  650. goto end;
  651. free_trace_kprobe(tk);
  652. }
  653. end:
  654. mutex_unlock(&probe_lock);
  655. return ret;
  656. }
  657. /* Probes listing interfaces */
  658. static void *probes_seq_start(struct seq_file *m, loff_t *pos)
  659. {
  660. mutex_lock(&probe_lock);
  661. return seq_list_start(&probe_list, *pos);
  662. }
  663. static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
  664. {
  665. return seq_list_next(v, &probe_list, pos);
  666. }
  667. static void probes_seq_stop(struct seq_file *m, void *v)
  668. {
  669. mutex_unlock(&probe_lock);
  670. }
  671. static int probes_seq_show(struct seq_file *m, void *v)
  672. {
  673. struct trace_kprobe *tk = v;
  674. int i;
  675. seq_putc(m, trace_kprobe_is_return(tk) ? 'r' : 'p');
  676. seq_printf(m, ":%s/%s", tk->tp.call.class->system,
  677. trace_event_name(&tk->tp.call));
  678. if (!tk->symbol)
  679. seq_printf(m, " 0x%p", tk->rp.kp.addr);
  680. else if (tk->rp.kp.offset)
  681. seq_printf(m, " %s+%u", trace_kprobe_symbol(tk),
  682. tk->rp.kp.offset);
  683. else
  684. seq_printf(m, " %s", trace_kprobe_symbol(tk));
  685. for (i = 0; i < tk->tp.nr_args; i++)
  686. seq_printf(m, " %s=%s", tk->tp.args[i].name, tk->tp.args[i].comm);
  687. seq_putc(m, '\n');
  688. return 0;
  689. }
  690. static const struct seq_operations probes_seq_op = {
  691. .start = probes_seq_start,
  692. .next = probes_seq_next,
  693. .stop = probes_seq_stop,
  694. .show = probes_seq_show
  695. };
  696. static int probes_open(struct inode *inode, struct file *file)
  697. {
  698. int ret;
  699. if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
  700. ret = release_all_trace_kprobes();
  701. if (ret < 0)
  702. return ret;
  703. }
  704. return seq_open(file, &probes_seq_op);
  705. }
  706. static ssize_t probes_write(struct file *file, const char __user *buffer,
  707. size_t count, loff_t *ppos)
  708. {
  709. return trace_parse_run_command(file, buffer, count, ppos,
  710. create_trace_kprobe);
  711. }
  712. static const struct file_operations kprobe_events_ops = {
  713. .owner = THIS_MODULE,
  714. .open = probes_open,
  715. .read = seq_read,
  716. .llseek = seq_lseek,
  717. .release = seq_release,
  718. .write = probes_write,
  719. };
  720. /* Probes profiling interfaces */
  721. static int probes_profile_seq_show(struct seq_file *m, void *v)
  722. {
  723. struct trace_kprobe *tk = v;
  724. seq_printf(m, " %-44s %15lu %15lu\n",
  725. trace_event_name(&tk->tp.call),
  726. trace_kprobe_nhit(tk),
  727. tk->rp.kp.nmissed);
  728. return 0;
  729. }
  730. static const struct seq_operations profile_seq_op = {
  731. .start = probes_seq_start,
  732. .next = probes_seq_next,
  733. .stop = probes_seq_stop,
  734. .show = probes_profile_seq_show
  735. };
  736. static int profile_open(struct inode *inode, struct file *file)
  737. {
  738. return seq_open(file, &profile_seq_op);
  739. }
  740. static const struct file_operations kprobe_profile_ops = {
  741. .owner = THIS_MODULE,
  742. .open = profile_open,
  743. .read = seq_read,
  744. .llseek = seq_lseek,
  745. .release = seq_release,
  746. };
  747. /* Kprobe specific fetch functions */
  748. /* Return the length of string -- including null terminal byte */
  749. static nokprobe_inline int
  750. fetch_store_strlen(unsigned long addr)
  751. {
  752. mm_segment_t old_fs;
  753. int ret, len = 0;
  754. u8 c;
  755. old_fs = get_fs();
  756. set_fs(KERNEL_DS);
  757. pagefault_disable();
  758. do {
  759. ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
  760. len++;
  761. } while (c && ret == 0 && len < MAX_STRING_SIZE);
  762. pagefault_enable();
  763. set_fs(old_fs);
  764. return (ret < 0) ? ret : len;
  765. }
  766. /*
  767. * Fetch a null-terminated string. Caller MUST set *(u32 *)buf with max
  768. * length and relative data location.
  769. */
  770. static nokprobe_inline int
  771. fetch_store_string(unsigned long addr, void *dest, void *base)
  772. {
  773. int maxlen = get_loc_len(*(u32 *)dest);
  774. u8 *dst = get_loc_data(dest, base);
  775. long ret;
  776. if (unlikely(!maxlen))
  777. return -ENOMEM;
  778. /*
  779. * Try to get string again, since the string can be changed while
  780. * probing.
  781. */
  782. ret = strncpy_from_unsafe(dst, (void *)addr, maxlen);
  783. if (ret >= 0)
  784. *(u32 *)dest = make_data_loc(ret, (void *)dst - base);
  785. return ret;
  786. }
  787. static nokprobe_inline int
  788. probe_mem_read(void *dest, void *src, size_t size)
  789. {
  790. return probe_kernel_read(dest, src, size);
  791. }
  792. /* Note that we don't verify it, since the code does not come from user space */
  793. static int
  794. process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest,
  795. void *base)
  796. {
  797. unsigned long val;
  798. retry:
  799. /* 1st stage: get value from context */
  800. switch (code->op) {
  801. case FETCH_OP_REG:
  802. val = regs_get_register(regs, code->param);
  803. break;
  804. case FETCH_OP_STACK:
  805. val = regs_get_kernel_stack_nth(regs, code->param);
  806. break;
  807. case FETCH_OP_STACKP:
  808. val = kernel_stack_pointer(regs);
  809. break;
  810. case FETCH_OP_RETVAL:
  811. val = regs_return_value(regs);
  812. break;
  813. case FETCH_OP_IMM:
  814. val = code->immediate;
  815. break;
  816. case FETCH_OP_COMM:
  817. val = (unsigned long)current->comm;
  818. break;
  819. #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
  820. case FETCH_OP_ARG:
  821. val = regs_get_kernel_argument(regs, code->param);
  822. break;
  823. #endif
  824. case FETCH_NOP_SYMBOL: /* Ignore a place holder */
  825. code++;
  826. goto retry;
  827. default:
  828. return -EILSEQ;
  829. }
  830. code++;
  831. return process_fetch_insn_bottom(code, val, dest, base);
  832. }
  833. NOKPROBE_SYMBOL(process_fetch_insn)
  834. /* Kprobe handler */
  835. static nokprobe_inline void
  836. __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
  837. struct trace_event_file *trace_file)
  838. {
  839. struct kprobe_trace_entry_head *entry;
  840. struct ring_buffer_event *event;
  841. struct ring_buffer *buffer;
  842. int size, dsize, pc;
  843. unsigned long irq_flags;
  844. struct trace_event_call *call = &tk->tp.call;
  845. WARN_ON(call != trace_file->event_call);
  846. if (trace_trigger_soft_disabled(trace_file))
  847. return;
  848. local_save_flags(irq_flags);
  849. pc = preempt_count();
  850. dsize = __get_data_size(&tk->tp, regs);
  851. size = sizeof(*entry) + tk->tp.size + dsize;
  852. event = trace_event_buffer_lock_reserve(&buffer, trace_file,
  853. call->event.type,
  854. size, irq_flags, pc);
  855. if (!event)
  856. return;
  857. entry = ring_buffer_event_data(event);
  858. entry->ip = (unsigned long)tk->rp.kp.addr;
  859. store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
  860. event_trigger_unlock_commit_regs(trace_file, buffer, event,
  861. entry, irq_flags, pc, regs);
  862. }
  863. static void
  864. kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs)
  865. {
  866. struct event_file_link *link;
  867. list_for_each_entry_rcu(link, &tk->tp.files, list)
  868. __kprobe_trace_func(tk, regs, link->file);
  869. }
  870. NOKPROBE_SYMBOL(kprobe_trace_func);
  871. /* Kretprobe handler */
  872. static nokprobe_inline void
  873. __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
  874. struct pt_regs *regs,
  875. struct trace_event_file *trace_file)
  876. {
  877. struct kretprobe_trace_entry_head *entry;
  878. struct ring_buffer_event *event;
  879. struct ring_buffer *buffer;
  880. int size, pc, dsize;
  881. unsigned long irq_flags;
  882. struct trace_event_call *call = &tk->tp.call;
  883. WARN_ON(call != trace_file->event_call);
  884. if (trace_trigger_soft_disabled(trace_file))
  885. return;
  886. local_save_flags(irq_flags);
  887. pc = preempt_count();
  888. dsize = __get_data_size(&tk->tp, regs);
  889. size = sizeof(*entry) + tk->tp.size + dsize;
  890. event = trace_event_buffer_lock_reserve(&buffer, trace_file,
  891. call->event.type,
  892. size, irq_flags, pc);
  893. if (!event)
  894. return;
  895. entry = ring_buffer_event_data(event);
  896. entry->func = (unsigned long)tk->rp.kp.addr;
  897. entry->ret_ip = (unsigned long)ri->ret_addr;
  898. store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
  899. event_trigger_unlock_commit_regs(trace_file, buffer, event,
  900. entry, irq_flags, pc, regs);
  901. }
  902. static void
  903. kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
  904. struct pt_regs *regs)
  905. {
  906. struct event_file_link *link;
  907. list_for_each_entry_rcu(link, &tk->tp.files, list)
  908. __kretprobe_trace_func(tk, ri, regs, link->file);
  909. }
  910. NOKPROBE_SYMBOL(kretprobe_trace_func);
  911. /* Event entry printers */
  912. static enum print_line_t
  913. print_kprobe_event(struct trace_iterator *iter, int flags,
  914. struct trace_event *event)
  915. {
  916. struct kprobe_trace_entry_head *field;
  917. struct trace_seq *s = &iter->seq;
  918. struct trace_probe *tp;
  919. field = (struct kprobe_trace_entry_head *)iter->ent;
  920. tp = container_of(event, struct trace_probe, call.event);
  921. trace_seq_printf(s, "%s: (", trace_event_name(&tp->call));
  922. if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
  923. goto out;
  924. trace_seq_putc(s, ')');
  925. if (print_probe_args(s, tp->args, tp->nr_args,
  926. (u8 *)&field[1], field) < 0)
  927. goto out;
  928. trace_seq_putc(s, '\n');
  929. out:
  930. return trace_handle_return(s);
  931. }
  932. static enum print_line_t
  933. print_kretprobe_event(struct trace_iterator *iter, int flags,
  934. struct trace_event *event)
  935. {
  936. struct kretprobe_trace_entry_head *field;
  937. struct trace_seq *s = &iter->seq;
  938. struct trace_probe *tp;
  939. field = (struct kretprobe_trace_entry_head *)iter->ent;
  940. tp = container_of(event, struct trace_probe, call.event);
  941. trace_seq_printf(s, "%s: (", trace_event_name(&tp->call));
  942. if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
  943. goto out;
  944. trace_seq_puts(s, " <- ");
  945. if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
  946. goto out;
  947. trace_seq_putc(s, ')');
  948. if (print_probe_args(s, tp->args, tp->nr_args,
  949. (u8 *)&field[1], field) < 0)
  950. goto out;
  951. trace_seq_putc(s, '\n');
  952. out:
  953. return trace_handle_return(s);
  954. }
  955. static int kprobe_event_define_fields(struct trace_event_call *event_call)
  956. {
  957. int ret;
  958. struct kprobe_trace_entry_head field;
  959. struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data;
  960. DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
  961. return traceprobe_define_arg_fields(event_call, sizeof(field), &tk->tp);
  962. }
  963. static int kretprobe_event_define_fields(struct trace_event_call *event_call)
  964. {
  965. int ret;
  966. struct kretprobe_trace_entry_head field;
  967. struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data;
  968. DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
  969. DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
  970. return traceprobe_define_arg_fields(event_call, sizeof(field), &tk->tp);
  971. }
  972. #ifdef CONFIG_PERF_EVENTS
  973. /* Kprobe profile handler */
  974. static int
  975. kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
  976. {
  977. struct trace_event_call *call = &tk->tp.call;
  978. struct kprobe_trace_entry_head *entry;
  979. struct hlist_head *head;
  980. int size, __size, dsize;
  981. int rctx;
  982. if (bpf_prog_array_valid(call)) {
  983. unsigned long orig_ip = instruction_pointer(regs);
  984. int ret;
  985. ret = trace_call_bpf(call, regs);
  986. /*
  987. * We need to check and see if we modified the pc of the
  988. * pt_regs, and if so return 1 so that we don't do the
  989. * single stepping.
  990. */
  991. if (orig_ip != instruction_pointer(regs))
  992. return 1;
  993. if (!ret)
  994. return 0;
  995. }
  996. head = this_cpu_ptr(call->perf_events);
  997. if (hlist_empty(head))
  998. return 0;
  999. dsize = __get_data_size(&tk->tp, regs);
  1000. __size = sizeof(*entry) + tk->tp.size + dsize;
  1001. size = ALIGN(__size + sizeof(u32), sizeof(u64));
  1002. size -= sizeof(u32);
  1003. entry = perf_trace_buf_alloc(size, NULL, &rctx);
  1004. if (!entry)
  1005. return 0;
  1006. entry->ip = (unsigned long)tk->rp.kp.addr;
  1007. memset(&entry[1], 0, dsize);
  1008. store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
  1009. perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
  1010. head, NULL);
  1011. return 0;
  1012. }
  1013. NOKPROBE_SYMBOL(kprobe_perf_func);
  1014. /* Kretprobe profile handler */
  1015. static void
  1016. kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
  1017. struct pt_regs *regs)
  1018. {
  1019. struct trace_event_call *call = &tk->tp.call;
  1020. struct kretprobe_trace_entry_head *entry;
  1021. struct hlist_head *head;
  1022. int size, __size, dsize;
  1023. int rctx;
  1024. if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs))
  1025. return;
  1026. head = this_cpu_ptr(call->perf_events);
  1027. if (hlist_empty(head))
  1028. return;
  1029. dsize = __get_data_size(&tk->tp, regs);
  1030. __size = sizeof(*entry) + tk->tp.size + dsize;
  1031. size = ALIGN(__size + sizeof(u32), sizeof(u64));
  1032. size -= sizeof(u32);
  1033. entry = perf_trace_buf_alloc(size, NULL, &rctx);
  1034. if (!entry)
  1035. return;
  1036. entry->func = (unsigned long)tk->rp.kp.addr;
  1037. entry->ret_ip = (unsigned long)ri->ret_addr;
  1038. store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
  1039. perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
  1040. head, NULL);
  1041. }
  1042. NOKPROBE_SYMBOL(kretprobe_perf_func);
  1043. int bpf_get_kprobe_info(const struct perf_event *event, u32 *fd_type,
  1044. const char **symbol, u64 *probe_offset,
  1045. u64 *probe_addr, bool perf_type_tracepoint)
  1046. {
  1047. const char *pevent = trace_event_name(event->tp_event);
  1048. const char *group = event->tp_event->class->system;
  1049. struct trace_kprobe *tk;
  1050. if (perf_type_tracepoint)
  1051. tk = find_trace_kprobe(pevent, group);
  1052. else
  1053. tk = event->tp_event->data;
  1054. if (!tk)
  1055. return -EINVAL;
  1056. *fd_type = trace_kprobe_is_return(tk) ? BPF_FD_TYPE_KRETPROBE
  1057. : BPF_FD_TYPE_KPROBE;
  1058. if (tk->symbol) {
  1059. *symbol = tk->symbol;
  1060. *probe_offset = tk->rp.kp.offset;
  1061. *probe_addr = 0;
  1062. } else {
  1063. *symbol = NULL;
  1064. *probe_offset = 0;
  1065. *probe_addr = (unsigned long)tk->rp.kp.addr;
  1066. }
  1067. return 0;
  1068. }
  1069. #endif /* CONFIG_PERF_EVENTS */
  1070. /*
  1071. * called by perf_trace_init() or __ftrace_set_clr_event() under event_mutex.
  1072. *
  1073. * kprobe_trace_self_tests_init() does enable_trace_probe/disable_trace_probe
  1074. * lockless, but we can't race with this __init function.
  1075. */
  1076. static int kprobe_register(struct trace_event_call *event,
  1077. enum trace_reg type, void *data)
  1078. {
  1079. struct trace_kprobe *tk = (struct trace_kprobe *)event->data;
  1080. struct trace_event_file *file = data;
  1081. switch (type) {
  1082. case TRACE_REG_REGISTER:
  1083. return enable_trace_kprobe(tk, file);
  1084. case TRACE_REG_UNREGISTER:
  1085. return disable_trace_kprobe(tk, file);
  1086. #ifdef CONFIG_PERF_EVENTS
  1087. case TRACE_REG_PERF_REGISTER:
  1088. return enable_trace_kprobe(tk, NULL);
  1089. case TRACE_REG_PERF_UNREGISTER:
  1090. return disable_trace_kprobe(tk, NULL);
  1091. case TRACE_REG_PERF_OPEN:
  1092. case TRACE_REG_PERF_CLOSE:
  1093. case TRACE_REG_PERF_ADD:
  1094. case TRACE_REG_PERF_DEL:
  1095. return 0;
  1096. #endif
  1097. }
  1098. return 0;
  1099. }
  1100. static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
  1101. {
  1102. struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp);
  1103. int ret = 0;
  1104. raw_cpu_inc(*tk->nhit);
  1105. if (tk->tp.flags & TP_FLAG_TRACE)
  1106. kprobe_trace_func(tk, regs);
  1107. #ifdef CONFIG_PERF_EVENTS
  1108. if (tk->tp.flags & TP_FLAG_PROFILE)
  1109. ret = kprobe_perf_func(tk, regs);
  1110. #endif
  1111. return ret;
  1112. }
  1113. NOKPROBE_SYMBOL(kprobe_dispatcher);
  1114. static int
  1115. kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
  1116. {
  1117. struct trace_kprobe *tk = container_of(ri->rp, struct trace_kprobe, rp);
  1118. raw_cpu_inc(*tk->nhit);
  1119. if (tk->tp.flags & TP_FLAG_TRACE)
  1120. kretprobe_trace_func(tk, ri, regs);
  1121. #ifdef CONFIG_PERF_EVENTS
  1122. if (tk->tp.flags & TP_FLAG_PROFILE)
  1123. kretprobe_perf_func(tk, ri, regs);
  1124. #endif
  1125. return 0; /* We don't tweek kernel, so just return 0 */
  1126. }
  1127. NOKPROBE_SYMBOL(kretprobe_dispatcher);
  1128. static struct trace_event_functions kretprobe_funcs = {
  1129. .trace = print_kretprobe_event
  1130. };
  1131. static struct trace_event_functions kprobe_funcs = {
  1132. .trace = print_kprobe_event
  1133. };
  1134. static inline void init_trace_event_call(struct trace_kprobe *tk,
  1135. struct trace_event_call *call)
  1136. {
  1137. INIT_LIST_HEAD(&call->class->fields);
  1138. if (trace_kprobe_is_return(tk)) {
  1139. call->event.funcs = &kretprobe_funcs;
  1140. call->class->define_fields = kretprobe_event_define_fields;
  1141. } else {
  1142. call->event.funcs = &kprobe_funcs;
  1143. call->class->define_fields = kprobe_event_define_fields;
  1144. }
  1145. call->flags = TRACE_EVENT_FL_KPROBE;
  1146. call->class->reg = kprobe_register;
  1147. call->data = tk;
  1148. }
  1149. static int register_kprobe_event(struct trace_kprobe *tk)
  1150. {
  1151. struct trace_event_call *call = &tk->tp.call;
  1152. int ret = 0;
  1153. init_trace_event_call(tk, call);
  1154. if (traceprobe_set_print_fmt(&tk->tp, trace_kprobe_is_return(tk)) < 0)
  1155. return -ENOMEM;
  1156. ret = register_trace_event(&call->event);
  1157. if (!ret) {
  1158. kfree(call->print_fmt);
  1159. return -ENODEV;
  1160. }
  1161. ret = trace_add_event_call(call);
  1162. if (ret) {
  1163. pr_info("Failed to register kprobe event: %s\n",
  1164. trace_event_name(call));
  1165. kfree(call->print_fmt);
  1166. unregister_trace_event(&call->event);
  1167. }
  1168. return ret;
  1169. }
  1170. static int unregister_kprobe_event(struct trace_kprobe *tk)
  1171. {
  1172. int ret;
  1173. /* tp->event is unregistered in trace_remove_event_call() */
  1174. ret = trace_remove_event_call(&tk->tp.call);
  1175. if (!ret)
  1176. kfree(tk->tp.call.print_fmt);
  1177. return ret;
  1178. }
  1179. #ifdef CONFIG_PERF_EVENTS
  1180. /* create a trace_kprobe, but don't add it to global lists */
  1181. struct trace_event_call *
  1182. create_local_trace_kprobe(char *func, void *addr, unsigned long offs,
  1183. bool is_return)
  1184. {
  1185. struct trace_kprobe *tk;
  1186. int ret;
  1187. char *event;
  1188. /*
  1189. * local trace_kprobes are not added to probe_list, so they are never
  1190. * searched in find_trace_kprobe(). Therefore, there is no concern of
  1191. * duplicated name here.
  1192. */
  1193. event = func ? func : "DUMMY_EVENT";
  1194. tk = alloc_trace_kprobe(KPROBE_EVENT_SYSTEM, event, (void *)addr, func,
  1195. offs, 0 /* maxactive */, 0 /* nargs */,
  1196. is_return);
  1197. if (IS_ERR(tk)) {
  1198. pr_info("Failed to allocate trace_probe.(%d)\n",
  1199. (int)PTR_ERR(tk));
  1200. return ERR_CAST(tk);
  1201. }
  1202. init_trace_event_call(tk, &tk->tp.call);
  1203. if (traceprobe_set_print_fmt(&tk->tp, trace_kprobe_is_return(tk)) < 0) {
  1204. ret = -ENOMEM;
  1205. goto error;
  1206. }
  1207. ret = __register_trace_kprobe(tk);
  1208. if (ret < 0) {
  1209. kfree(tk->tp.call.print_fmt);
  1210. goto error;
  1211. }
  1212. return &tk->tp.call;
  1213. error:
  1214. free_trace_kprobe(tk);
  1215. return ERR_PTR(ret);
  1216. }
  1217. void destroy_local_trace_kprobe(struct trace_event_call *event_call)
  1218. {
  1219. struct trace_kprobe *tk;
  1220. tk = container_of(event_call, struct trace_kprobe, tp.call);
  1221. if (trace_probe_is_enabled(&tk->tp)) {
  1222. WARN_ON(1);
  1223. return;
  1224. }
  1225. __unregister_trace_kprobe(tk);
  1226. kfree(tk->tp.call.print_fmt);
  1227. free_trace_kprobe(tk);
  1228. }
  1229. #endif /* CONFIG_PERF_EVENTS */
  1230. /* Make a tracefs interface for controlling probe points */
  1231. static __init int init_kprobe_trace(void)
  1232. {
  1233. struct dentry *d_tracer;
  1234. struct dentry *entry;
  1235. if (register_module_notifier(&trace_kprobe_module_nb))
  1236. return -EINVAL;
  1237. d_tracer = tracing_init_dentry();
  1238. if (IS_ERR(d_tracer))
  1239. return 0;
  1240. entry = tracefs_create_file("kprobe_events", 0644, d_tracer,
  1241. NULL, &kprobe_events_ops);
  1242. /* Event list interface */
  1243. if (!entry)
  1244. pr_warn("Could not create tracefs 'kprobe_events' entry\n");
  1245. /* Profile interface */
  1246. entry = tracefs_create_file("kprobe_profile", 0444, d_tracer,
  1247. NULL, &kprobe_profile_ops);
  1248. if (!entry)
  1249. pr_warn("Could not create tracefs 'kprobe_profile' entry\n");
  1250. return 0;
  1251. }
  1252. fs_initcall(init_kprobe_trace);
  1253. #ifdef CONFIG_FTRACE_STARTUP_TEST
  1254. static __init struct trace_event_file *
  1255. find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr)
  1256. {
  1257. struct trace_event_file *file;
  1258. list_for_each_entry(file, &tr->events, list)
  1259. if (file->event_call == &tk->tp.call)
  1260. return file;
  1261. return NULL;
  1262. }
  1263. /*
  1264. * Nobody but us can call enable_trace_kprobe/disable_trace_kprobe at this
  1265. * stage, we can do this lockless.
  1266. */
  1267. static __init int kprobe_trace_self_tests_init(void)
  1268. {
  1269. int ret, warn = 0;
  1270. int (*target)(int, int, int, int, int, int);
  1271. struct trace_kprobe *tk;
  1272. struct trace_event_file *file;
  1273. if (tracing_is_disabled())
  1274. return -ENODEV;
  1275. target = kprobe_trace_selftest_target;
  1276. pr_info("Testing kprobe tracing: ");
  1277. ret = trace_run_command("p:testprobe kprobe_trace_selftest_target "
  1278. "$stack $stack0 +0($stack)",
  1279. create_trace_kprobe);
  1280. if (WARN_ON_ONCE(ret)) {
  1281. pr_warn("error on probing function entry.\n");
  1282. warn++;
  1283. } else {
  1284. /* Enable trace point */
  1285. tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
  1286. if (WARN_ON_ONCE(tk == NULL)) {
  1287. pr_warn("error on getting new probe.\n");
  1288. warn++;
  1289. } else {
  1290. file = find_trace_probe_file(tk, top_trace_array());
  1291. if (WARN_ON_ONCE(file == NULL)) {
  1292. pr_warn("error on getting probe file.\n");
  1293. warn++;
  1294. } else
  1295. enable_trace_kprobe(tk, file);
  1296. }
  1297. }
  1298. ret = trace_run_command("r:testprobe2 kprobe_trace_selftest_target "
  1299. "$retval", create_trace_kprobe);
  1300. if (WARN_ON_ONCE(ret)) {
  1301. pr_warn("error on probing function return.\n");
  1302. warn++;
  1303. } else {
  1304. /* Enable trace point */
  1305. tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
  1306. if (WARN_ON_ONCE(tk == NULL)) {
  1307. pr_warn("error on getting 2nd new probe.\n");
  1308. warn++;
  1309. } else {
  1310. file = find_trace_probe_file(tk, top_trace_array());
  1311. if (WARN_ON_ONCE(file == NULL)) {
  1312. pr_warn("error on getting probe file.\n");
  1313. warn++;
  1314. } else
  1315. enable_trace_kprobe(tk, file);
  1316. }
  1317. }
  1318. if (warn)
  1319. goto end;
  1320. ret = target(1, 2, 3, 4, 5, 6);
  1321. /*
  1322. * Not expecting an error here, the check is only to prevent the
  1323. * optimizer from removing the call to target() as otherwise there
  1324. * are no side-effects and the call is never performed.
  1325. */
  1326. if (ret != 21)
  1327. warn++;
  1328. /* Disable trace points before removing it */
  1329. tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
  1330. if (WARN_ON_ONCE(tk == NULL)) {
  1331. pr_warn("error on getting test probe.\n");
  1332. warn++;
  1333. } else {
  1334. if (trace_kprobe_nhit(tk) != 1) {
  1335. pr_warn("incorrect number of testprobe hits\n");
  1336. warn++;
  1337. }
  1338. file = find_trace_probe_file(tk, top_trace_array());
  1339. if (WARN_ON_ONCE(file == NULL)) {
  1340. pr_warn("error on getting probe file.\n");
  1341. warn++;
  1342. } else
  1343. disable_trace_kprobe(tk, file);
  1344. }
  1345. tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
  1346. if (WARN_ON_ONCE(tk == NULL)) {
  1347. pr_warn("error on getting 2nd test probe.\n");
  1348. warn++;
  1349. } else {
  1350. if (trace_kprobe_nhit(tk) != 1) {
  1351. pr_warn("incorrect number of testprobe2 hits\n");
  1352. warn++;
  1353. }
  1354. file = find_trace_probe_file(tk, top_trace_array());
  1355. if (WARN_ON_ONCE(file == NULL)) {
  1356. pr_warn("error on getting probe file.\n");
  1357. warn++;
  1358. } else
  1359. disable_trace_kprobe(tk, file);
  1360. }
  1361. ret = trace_run_command("-:testprobe", create_trace_kprobe);
  1362. if (WARN_ON_ONCE(ret)) {
  1363. pr_warn("error on deleting a probe.\n");
  1364. warn++;
  1365. }
  1366. ret = trace_run_command("-:testprobe2", create_trace_kprobe);
  1367. if (WARN_ON_ONCE(ret)) {
  1368. pr_warn("error on deleting a probe.\n");
  1369. warn++;
  1370. }
  1371. end:
  1372. release_all_trace_kprobes();
  1373. /*
  1374. * Wait for the optimizer work to finish. Otherwise it might fiddle
  1375. * with probes in already freed __init text.
  1376. */
  1377. wait_for_kprobe_optimizer();
  1378. if (warn)
  1379. pr_cont("NG: Some tests are failed. Please check them.\n");
  1380. else
  1381. pr_cont("OK\n");
  1382. return 0;
  1383. }
  1384. late_initcall(kprobe_trace_self_tests_init);
  1385. #endif