trace_uprobe.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431
  1. /*
  2. * uprobes-based tracing events
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public License
  14. * along with this program; if not, write to the Free Software
  15. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  16. *
  17. * Copyright (C) IBM Corporation, 2010-2012
  18. * Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
  19. */
  20. #define pr_fmt(fmt) "trace_kprobe: " fmt
  21. #include <linux/module.h>
  22. #include <linux/uaccess.h>
  23. #include <linux/uprobes.h>
  24. #include <linux/namei.h>
  25. #include <linux/string.h>
  26. #include <linux/rculist.h>
  27. #include "trace_probe.h"
  28. #define UPROBE_EVENT_SYSTEM "uprobes"
  29. struct uprobe_trace_entry_head {
  30. struct trace_entry ent;
  31. unsigned long vaddr[];
  32. };
  33. #define SIZEOF_TRACE_ENTRY(is_return) \
  34. (sizeof(struct uprobe_trace_entry_head) + \
  35. sizeof(unsigned long) * (is_return ? 2 : 1))
  36. #define DATAOF_TRACE_ENTRY(entry, is_return) \
  37. ((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return))
  38. struct trace_uprobe_filter {
  39. rwlock_t rwlock;
  40. int nr_systemwide;
  41. struct list_head perf_events;
  42. };
  43. /*
  44. * uprobe event core functions
  45. */
  46. struct trace_uprobe {
  47. struct list_head list;
  48. struct trace_uprobe_filter filter;
  49. struct uprobe_consumer consumer;
  50. struct path path;
  51. struct inode *inode;
  52. char *filename;
  53. unsigned long offset;
  54. unsigned long nhit;
  55. struct trace_probe tp;
  56. };
  57. #define SIZEOF_TRACE_UPROBE(n) \
  58. (offsetof(struct trace_uprobe, tp.args) + \
  59. (sizeof(struct probe_arg) * (n)))
  60. static int register_uprobe_event(struct trace_uprobe *tu);
  61. static int unregister_uprobe_event(struct trace_uprobe *tu);
  62. static DEFINE_MUTEX(uprobe_lock);
  63. static LIST_HEAD(uprobe_list);
  64. struct uprobe_dispatch_data {
  65. struct trace_uprobe *tu;
  66. unsigned long bp_addr;
  67. };
  68. static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs);
  69. static int uretprobe_dispatcher(struct uprobe_consumer *con,
  70. unsigned long func, struct pt_regs *regs);
  71. #ifdef CONFIG_STACK_GROWSUP
  72. static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
  73. {
  74. return addr - (n * sizeof(long));
  75. }
  76. #else
  77. static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
  78. {
  79. return addr + (n * sizeof(long));
  80. }
  81. #endif
  82. static unsigned long get_user_stack_nth(struct pt_regs *regs, unsigned int n)
  83. {
  84. unsigned long ret;
  85. unsigned long addr = user_stack_pointer(regs);
  86. addr = adjust_stack_addr(addr, n);
  87. if (copy_from_user(&ret, (void __force __user *) addr, sizeof(ret)))
  88. return 0;
  89. return ret;
  90. }
  91. /*
  92. * Uprobes-specific fetch functions
  93. */
  94. #define DEFINE_FETCH_stack(type) \
  95. static void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs, \
  96. void *offset, void *dest) \
  97. { \
  98. *(type *)dest = (type)get_user_stack_nth(regs, \
  99. ((unsigned long)offset)); \
  100. }
  101. DEFINE_BASIC_FETCH_FUNCS(stack)
  102. /* No string on the stack entry */
  103. #define fetch_stack_string NULL
  104. #define fetch_stack_string_size NULL
  105. #define DEFINE_FETCH_memory(type) \
  106. static void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs, \
  107. void *addr, void *dest) \
  108. { \
  109. type retval; \
  110. void __user *vaddr = (void __force __user *) addr; \
  111. \
  112. if (copy_from_user(&retval, vaddr, sizeof(type))) \
  113. *(type *)dest = 0; \
  114. else \
  115. *(type *) dest = retval; \
  116. }
  117. DEFINE_BASIC_FETCH_FUNCS(memory)
  118. /*
  119. * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
  120. * length and relative data location.
  121. */
  122. static void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
  123. void *addr, void *dest)
  124. {
  125. long ret;
  126. u32 rloc = *(u32 *)dest;
  127. int maxlen = get_rloc_len(rloc);
  128. u8 *dst = get_rloc_data(dest);
  129. void __user *src = (void __force __user *) addr;
  130. if (!maxlen)
  131. return;
  132. ret = strncpy_from_user(dst, src, maxlen);
  133. if (ret == maxlen)
  134. dst[--ret] = '\0';
  135. if (ret < 0) { /* Failed to fetch string */
  136. ((u8 *)get_rloc_data(dest))[0] = '\0';
  137. *(u32 *)dest = make_data_rloc(0, get_rloc_offs(rloc));
  138. } else {
  139. *(u32 *)dest = make_data_rloc(ret, get_rloc_offs(rloc));
  140. }
  141. }
  142. static void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
  143. void *addr, void *dest)
  144. {
  145. int len;
  146. void __user *vaddr = (void __force __user *) addr;
  147. len = strnlen_user(vaddr, MAX_STRING_SIZE);
  148. if (len == 0 || len > MAX_STRING_SIZE) /* Failed to check length */
  149. *(u32 *)dest = 0;
  150. else
  151. *(u32 *)dest = len;
  152. }
  153. static unsigned long translate_user_vaddr(void *file_offset)
  154. {
  155. unsigned long base_addr;
  156. struct uprobe_dispatch_data *udd;
  157. udd = (void *) current->utask->vaddr;
  158. base_addr = udd->bp_addr - udd->tu->offset;
  159. return base_addr + (unsigned long)file_offset;
  160. }
  161. #define DEFINE_FETCH_file_offset(type) \
  162. static void FETCH_FUNC_NAME(file_offset, type)(struct pt_regs *regs, \
  163. void *offset, void *dest)\
  164. { \
  165. void *vaddr = (void *)translate_user_vaddr(offset); \
  166. \
  167. FETCH_FUNC_NAME(memory, type)(regs, vaddr, dest); \
  168. }
  169. DEFINE_BASIC_FETCH_FUNCS(file_offset)
  170. DEFINE_FETCH_file_offset(string)
  171. DEFINE_FETCH_file_offset(string_size)
  172. /* Fetch type information table */
  173. static const struct fetch_type uprobes_fetch_type_table[] = {
  174. /* Special types */
  175. [FETCH_TYPE_STRING] = __ASSIGN_FETCH_TYPE("string", string, string,
  176. sizeof(u32), 1, "__data_loc char[]"),
  177. [FETCH_TYPE_STRSIZE] = __ASSIGN_FETCH_TYPE("string_size", u32,
  178. string_size, sizeof(u32), 0, "u32"),
  179. /* Basic types */
  180. ASSIGN_FETCH_TYPE(u8, u8, 0),
  181. ASSIGN_FETCH_TYPE(u16, u16, 0),
  182. ASSIGN_FETCH_TYPE(u32, u32, 0),
  183. ASSIGN_FETCH_TYPE(u64, u64, 0),
  184. ASSIGN_FETCH_TYPE(s8, u8, 1),
  185. ASSIGN_FETCH_TYPE(s16, u16, 1),
  186. ASSIGN_FETCH_TYPE(s32, u32, 1),
  187. ASSIGN_FETCH_TYPE(s64, u64, 1),
  188. ASSIGN_FETCH_TYPE_ALIAS(x8, u8, u8, 0),
  189. ASSIGN_FETCH_TYPE_ALIAS(x16, u16, u16, 0),
  190. ASSIGN_FETCH_TYPE_ALIAS(x32, u32, u32, 0),
  191. ASSIGN_FETCH_TYPE_ALIAS(x64, u64, u64, 0),
  192. ASSIGN_FETCH_TYPE_END
  193. };
  194. static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter)
  195. {
  196. rwlock_init(&filter->rwlock);
  197. filter->nr_systemwide = 0;
  198. INIT_LIST_HEAD(&filter->perf_events);
  199. }
  200. static inline bool uprobe_filter_is_empty(struct trace_uprobe_filter *filter)
  201. {
  202. return !filter->nr_systemwide && list_empty(&filter->perf_events);
  203. }
  204. static inline bool is_ret_probe(struct trace_uprobe *tu)
  205. {
  206. return tu->consumer.ret_handler != NULL;
  207. }
  208. /*
  209. * Allocate new trace_uprobe and initialize it (including uprobes).
  210. */
  211. static struct trace_uprobe *
  212. alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
  213. {
  214. struct trace_uprobe *tu;
  215. if (!event || !is_good_name(event))
  216. return ERR_PTR(-EINVAL);
  217. if (!group || !is_good_name(group))
  218. return ERR_PTR(-EINVAL);
  219. tu = kzalloc(SIZEOF_TRACE_UPROBE(nargs), GFP_KERNEL);
  220. if (!tu)
  221. return ERR_PTR(-ENOMEM);
  222. tu->tp.call.class = &tu->tp.class;
  223. tu->tp.call.name = kstrdup(event, GFP_KERNEL);
  224. if (!tu->tp.call.name)
  225. goto error;
  226. tu->tp.class.system = kstrdup(group, GFP_KERNEL);
  227. if (!tu->tp.class.system)
  228. goto error;
  229. INIT_LIST_HEAD(&tu->list);
  230. INIT_LIST_HEAD(&tu->tp.files);
  231. tu->consumer.handler = uprobe_dispatcher;
  232. if (is_ret)
  233. tu->consumer.ret_handler = uretprobe_dispatcher;
  234. init_trace_uprobe_filter(&tu->filter);
  235. return tu;
  236. error:
  237. kfree(tu->tp.call.name);
  238. kfree(tu);
  239. return ERR_PTR(-ENOMEM);
  240. }
  241. static void free_trace_uprobe(struct trace_uprobe *tu)
  242. {
  243. int i;
  244. for (i = 0; i < tu->tp.nr_args; i++)
  245. traceprobe_free_probe_arg(&tu->tp.args[i]);
  246. path_put(&tu->path);
  247. kfree(tu->tp.call.class->system);
  248. kfree(tu->tp.call.name);
  249. kfree(tu->filename);
  250. kfree(tu);
  251. }
  252. static struct trace_uprobe *find_probe_event(const char *event, const char *group)
  253. {
  254. struct trace_uprobe *tu;
  255. list_for_each_entry(tu, &uprobe_list, list)
  256. if (strcmp(trace_event_name(&tu->tp.call), event) == 0 &&
  257. strcmp(tu->tp.call.class->system, group) == 0)
  258. return tu;
  259. return NULL;
  260. }
  261. /* Unregister a trace_uprobe and probe_event: call with locking uprobe_lock */
  262. static int unregister_trace_uprobe(struct trace_uprobe *tu)
  263. {
  264. int ret;
  265. ret = unregister_uprobe_event(tu);
  266. if (ret)
  267. return ret;
  268. list_del(&tu->list);
  269. free_trace_uprobe(tu);
  270. return 0;
  271. }
  272. /* Register a trace_uprobe and probe_event */
  273. static int register_trace_uprobe(struct trace_uprobe *tu)
  274. {
  275. struct trace_uprobe *old_tu;
  276. int ret;
  277. mutex_lock(&uprobe_lock);
  278. /* register as an event */
  279. old_tu = find_probe_event(trace_event_name(&tu->tp.call),
  280. tu->tp.call.class->system);
  281. if (old_tu) {
  282. /* delete old event */
  283. ret = unregister_trace_uprobe(old_tu);
  284. if (ret)
  285. goto end;
  286. }
  287. ret = register_uprobe_event(tu);
  288. if (ret) {
  289. pr_warn("Failed to register probe event(%d)\n", ret);
  290. goto end;
  291. }
  292. list_add_tail(&tu->list, &uprobe_list);
  293. end:
  294. mutex_unlock(&uprobe_lock);
  295. return ret;
  296. }
  297. /*
  298. * Argument syntax:
  299. * - Add uprobe: p|r[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS]
  300. *
  301. * - Remove uprobe: -:[GRP/]EVENT
  302. */
  303. static int create_trace_uprobe(int argc, char **argv)
  304. {
  305. struct trace_uprobe *tu;
  306. char *arg, *event, *group, *filename;
  307. char buf[MAX_EVENT_NAME_LEN];
  308. struct path path;
  309. unsigned long offset;
  310. bool is_delete, is_return;
  311. int i, ret;
  312. ret = 0;
  313. is_delete = false;
  314. is_return = false;
  315. event = NULL;
  316. group = NULL;
  317. /* argc must be >= 1 */
  318. if (argv[0][0] == '-')
  319. is_delete = true;
  320. else if (argv[0][0] == 'r')
  321. is_return = true;
  322. else if (argv[0][0] != 'p') {
  323. pr_info("Probe definition must be started with 'p', 'r' or '-'.\n");
  324. return -EINVAL;
  325. }
  326. if (argv[0][1] == ':') {
  327. event = &argv[0][2];
  328. arg = strchr(event, '/');
  329. if (arg) {
  330. group = event;
  331. event = arg + 1;
  332. event[-1] = '\0';
  333. if (strlen(group) == 0) {
  334. pr_info("Group name is not specified\n");
  335. return -EINVAL;
  336. }
  337. }
  338. if (strlen(event) == 0) {
  339. pr_info("Event name is not specified\n");
  340. return -EINVAL;
  341. }
  342. }
  343. if (!group)
  344. group = UPROBE_EVENT_SYSTEM;
  345. if (is_delete) {
  346. int ret;
  347. if (!event) {
  348. pr_info("Delete command needs an event name.\n");
  349. return -EINVAL;
  350. }
  351. mutex_lock(&uprobe_lock);
  352. tu = find_probe_event(event, group);
  353. if (!tu) {
  354. mutex_unlock(&uprobe_lock);
  355. pr_info("Event %s/%s doesn't exist.\n", group, event);
  356. return -ENOENT;
  357. }
  358. /* delete an event */
  359. ret = unregister_trace_uprobe(tu);
  360. mutex_unlock(&uprobe_lock);
  361. return ret;
  362. }
  363. if (argc < 2) {
  364. pr_info("Probe point is not specified.\n");
  365. return -EINVAL;
  366. }
  367. /* Find the last occurrence, in case the path contains ':' too. */
  368. arg = strrchr(argv[1], ':');
  369. if (!arg)
  370. return -EINVAL;
  371. *arg++ = '\0';
  372. filename = argv[1];
  373. ret = kern_path(filename, LOOKUP_FOLLOW, &path);
  374. if (ret)
  375. return ret;
  376. if (!d_is_reg(path.dentry)) {
  377. ret = -EINVAL;
  378. goto fail_address_parse;
  379. }
  380. ret = kstrtoul(arg, 0, &offset);
  381. if (ret)
  382. goto fail_address_parse;
  383. argc -= 2;
  384. argv += 2;
  385. /* setup a probe */
  386. if (!event) {
  387. char *tail;
  388. char *ptr;
  389. tail = kstrdup(kbasename(filename), GFP_KERNEL);
  390. if (!tail) {
  391. ret = -ENOMEM;
  392. goto fail_address_parse;
  393. }
  394. ptr = strpbrk(tail, ".-_");
  395. if (ptr)
  396. *ptr = '\0';
  397. snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_0x%lx", 'p', tail, offset);
  398. event = buf;
  399. kfree(tail);
  400. }
  401. tu = alloc_trace_uprobe(group, event, argc, is_return);
  402. if (IS_ERR(tu)) {
  403. pr_info("Failed to allocate trace_uprobe.(%d)\n", (int)PTR_ERR(tu));
  404. ret = PTR_ERR(tu);
  405. goto fail_address_parse;
  406. }
  407. tu->offset = offset;
  408. tu->path = path;
  409. tu->filename = kstrdup(filename, GFP_KERNEL);
  410. if (!tu->filename) {
  411. pr_info("Failed to allocate filename.\n");
  412. ret = -ENOMEM;
  413. goto error;
  414. }
  415. /* parse arguments */
  416. ret = 0;
  417. for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
  418. struct probe_arg *parg = &tu->tp.args[i];
  419. /* Increment count for freeing args in error case */
  420. tu->tp.nr_args++;
  421. /* Parse argument name */
  422. arg = strchr(argv[i], '=');
  423. if (arg) {
  424. *arg++ = '\0';
  425. parg->name = kstrdup(argv[i], GFP_KERNEL);
  426. } else {
  427. arg = argv[i];
  428. /* If argument name is omitted, set "argN" */
  429. snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1);
  430. parg->name = kstrdup(buf, GFP_KERNEL);
  431. }
  432. if (!parg->name) {
  433. pr_info("Failed to allocate argument[%d] name.\n", i);
  434. ret = -ENOMEM;
  435. goto error;
  436. }
  437. if (!is_good_name(parg->name)) {
  438. pr_info("Invalid argument[%d] name: %s\n", i, parg->name);
  439. ret = -EINVAL;
  440. goto error;
  441. }
  442. if (traceprobe_conflict_field_name(parg->name, tu->tp.args, i)) {
  443. pr_info("Argument[%d] name '%s' conflicts with "
  444. "another field.\n", i, argv[i]);
  445. ret = -EINVAL;
  446. goto error;
  447. }
  448. /* Parse fetch argument */
  449. ret = traceprobe_parse_probe_arg(arg, &tu->tp.size, parg,
  450. is_return, false,
  451. uprobes_fetch_type_table);
  452. if (ret) {
  453. pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
  454. goto error;
  455. }
  456. }
  457. ret = register_trace_uprobe(tu);
  458. if (ret)
  459. goto error;
  460. return 0;
  461. error:
  462. free_trace_uprobe(tu);
  463. return ret;
  464. fail_address_parse:
  465. path_put(&path);
  466. pr_info("Failed to parse address or file.\n");
  467. return ret;
  468. }
  469. static int cleanup_all_probes(void)
  470. {
  471. struct trace_uprobe *tu;
  472. int ret = 0;
  473. mutex_lock(&uprobe_lock);
  474. while (!list_empty(&uprobe_list)) {
  475. tu = list_entry(uprobe_list.next, struct trace_uprobe, list);
  476. ret = unregister_trace_uprobe(tu);
  477. if (ret)
  478. break;
  479. }
  480. mutex_unlock(&uprobe_lock);
  481. return ret;
  482. }
  483. /* Probes listing interfaces */
  484. static void *probes_seq_start(struct seq_file *m, loff_t *pos)
  485. {
  486. mutex_lock(&uprobe_lock);
  487. return seq_list_start(&uprobe_list, *pos);
  488. }
  489. static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
  490. {
  491. return seq_list_next(v, &uprobe_list, pos);
  492. }
  493. static void probes_seq_stop(struct seq_file *m, void *v)
  494. {
  495. mutex_unlock(&uprobe_lock);
  496. }
  497. static int probes_seq_show(struct seq_file *m, void *v)
  498. {
  499. struct trace_uprobe *tu = v;
  500. char c = is_ret_probe(tu) ? 'r' : 'p';
  501. int i;
  502. seq_printf(m, "%c:%s/%s %s:0x%0*lx", c, tu->tp.call.class->system,
  503. trace_event_name(&tu->tp.call), tu->filename,
  504. (int)(sizeof(void *) * 2), tu->offset);
  505. for (i = 0; i < tu->tp.nr_args; i++)
  506. seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm);
  507. seq_putc(m, '\n');
  508. return 0;
  509. }
  510. static const struct seq_operations probes_seq_op = {
  511. .start = probes_seq_start,
  512. .next = probes_seq_next,
  513. .stop = probes_seq_stop,
  514. .show = probes_seq_show
  515. };
  516. static int probes_open(struct inode *inode, struct file *file)
  517. {
  518. int ret;
  519. if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
  520. ret = cleanup_all_probes();
  521. if (ret)
  522. return ret;
  523. }
  524. return seq_open(file, &probes_seq_op);
  525. }
  526. static ssize_t probes_write(struct file *file, const char __user *buffer,
  527. size_t count, loff_t *ppos)
  528. {
  529. return trace_parse_run_command(file, buffer, count, ppos, create_trace_uprobe);
  530. }
  531. static const struct file_operations uprobe_events_ops = {
  532. .owner = THIS_MODULE,
  533. .open = probes_open,
  534. .read = seq_read,
  535. .llseek = seq_lseek,
  536. .release = seq_release,
  537. .write = probes_write,
  538. };
  539. /* Probes profiling interfaces */
  540. static int probes_profile_seq_show(struct seq_file *m, void *v)
  541. {
  542. struct trace_uprobe *tu = v;
  543. seq_printf(m, " %s %-44s %15lu\n", tu->filename,
  544. trace_event_name(&tu->tp.call), tu->nhit);
  545. return 0;
  546. }
  547. static const struct seq_operations profile_seq_op = {
  548. .start = probes_seq_start,
  549. .next = probes_seq_next,
  550. .stop = probes_seq_stop,
  551. .show = probes_profile_seq_show
  552. };
  553. static int profile_open(struct inode *inode, struct file *file)
  554. {
  555. return seq_open(file, &profile_seq_op);
  556. }
  557. static const struct file_operations uprobe_profile_ops = {
  558. .owner = THIS_MODULE,
  559. .open = profile_open,
  560. .read = seq_read,
  561. .llseek = seq_lseek,
  562. .release = seq_release,
  563. };
  564. struct uprobe_cpu_buffer {
  565. struct mutex mutex;
  566. void *buf;
  567. };
  568. static struct uprobe_cpu_buffer __percpu *uprobe_cpu_buffer;
  569. static int uprobe_buffer_refcnt;
  570. static int uprobe_buffer_init(void)
  571. {
  572. int cpu, err_cpu;
  573. uprobe_cpu_buffer = alloc_percpu(struct uprobe_cpu_buffer);
  574. if (uprobe_cpu_buffer == NULL)
  575. return -ENOMEM;
  576. for_each_possible_cpu(cpu) {
  577. struct page *p = alloc_pages_node(cpu_to_node(cpu),
  578. GFP_KERNEL, 0);
  579. if (p == NULL) {
  580. err_cpu = cpu;
  581. goto err;
  582. }
  583. per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf = page_address(p);
  584. mutex_init(&per_cpu_ptr(uprobe_cpu_buffer, cpu)->mutex);
  585. }
  586. return 0;
  587. err:
  588. for_each_possible_cpu(cpu) {
  589. if (cpu == err_cpu)
  590. break;
  591. free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf);
  592. }
  593. free_percpu(uprobe_cpu_buffer);
  594. return -ENOMEM;
  595. }
  596. static int uprobe_buffer_enable(void)
  597. {
  598. int ret = 0;
  599. BUG_ON(!mutex_is_locked(&event_mutex));
  600. if (uprobe_buffer_refcnt++ == 0) {
  601. ret = uprobe_buffer_init();
  602. if (ret < 0)
  603. uprobe_buffer_refcnt--;
  604. }
  605. return ret;
  606. }
  607. static void uprobe_buffer_disable(void)
  608. {
  609. int cpu;
  610. BUG_ON(!mutex_is_locked(&event_mutex));
  611. if (--uprobe_buffer_refcnt == 0) {
  612. for_each_possible_cpu(cpu)
  613. free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer,
  614. cpu)->buf);
  615. free_percpu(uprobe_cpu_buffer);
  616. uprobe_cpu_buffer = NULL;
  617. }
  618. }
  619. static struct uprobe_cpu_buffer *uprobe_buffer_get(void)
  620. {
  621. struct uprobe_cpu_buffer *ucb;
  622. int cpu;
  623. cpu = raw_smp_processor_id();
  624. ucb = per_cpu_ptr(uprobe_cpu_buffer, cpu);
  625. /*
  626. * Use per-cpu buffers for fastest access, but we might migrate
  627. * so the mutex makes sure we have sole access to it.
  628. */
  629. mutex_lock(&ucb->mutex);
  630. return ucb;
  631. }
  632. static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb)
  633. {
  634. mutex_unlock(&ucb->mutex);
  635. }
  636. static void __uprobe_trace_func(struct trace_uprobe *tu,
  637. unsigned long func, struct pt_regs *regs,
  638. struct uprobe_cpu_buffer *ucb, int dsize,
  639. struct trace_event_file *trace_file)
  640. {
  641. struct uprobe_trace_entry_head *entry;
  642. struct ring_buffer_event *event;
  643. struct ring_buffer *buffer;
  644. void *data;
  645. int size, esize;
  646. struct trace_event_call *call = &tu->tp.call;
  647. WARN_ON(call != trace_file->event_call);
  648. if (WARN_ON_ONCE(tu->tp.size + dsize > PAGE_SIZE))
  649. return;
  650. if (trace_trigger_soft_disabled(trace_file))
  651. return;
  652. esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
  653. size = esize + tu->tp.size + dsize;
  654. event = trace_event_buffer_lock_reserve(&buffer, trace_file,
  655. call->event.type, size, 0, 0);
  656. if (!event)
  657. return;
  658. entry = ring_buffer_event_data(event);
  659. if (is_ret_probe(tu)) {
  660. entry->vaddr[0] = func;
  661. entry->vaddr[1] = instruction_pointer(regs);
  662. data = DATAOF_TRACE_ENTRY(entry, true);
  663. } else {
  664. entry->vaddr[0] = instruction_pointer(regs);
  665. data = DATAOF_TRACE_ENTRY(entry, false);
  666. }
  667. memcpy(data, ucb->buf, tu->tp.size + dsize);
  668. event_trigger_unlock_commit(trace_file, buffer, event, entry, 0, 0);
  669. }
  670. /* uprobe handler */
  671. static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs,
  672. struct uprobe_cpu_buffer *ucb, int dsize)
  673. {
  674. struct event_file_link *link;
  675. if (is_ret_probe(tu))
  676. return 0;
  677. rcu_read_lock();
  678. list_for_each_entry_rcu(link, &tu->tp.files, list)
  679. __uprobe_trace_func(tu, 0, regs, ucb, dsize, link->file);
  680. rcu_read_unlock();
  681. return 0;
  682. }
  683. static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func,
  684. struct pt_regs *regs,
  685. struct uprobe_cpu_buffer *ucb, int dsize)
  686. {
  687. struct event_file_link *link;
  688. rcu_read_lock();
  689. list_for_each_entry_rcu(link, &tu->tp.files, list)
  690. __uprobe_trace_func(tu, func, regs, ucb, dsize, link->file);
  691. rcu_read_unlock();
  692. }
  693. /* Event entry printers */
  694. static enum print_line_t
  695. print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event)
  696. {
  697. struct uprobe_trace_entry_head *entry;
  698. struct trace_seq *s = &iter->seq;
  699. struct trace_uprobe *tu;
  700. u8 *data;
  701. int i;
  702. entry = (struct uprobe_trace_entry_head *)iter->ent;
  703. tu = container_of(event, struct trace_uprobe, tp.call.event);
  704. if (is_ret_probe(tu)) {
  705. trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)",
  706. trace_event_name(&tu->tp.call),
  707. entry->vaddr[1], entry->vaddr[0]);
  708. data = DATAOF_TRACE_ENTRY(entry, true);
  709. } else {
  710. trace_seq_printf(s, "%s: (0x%lx)",
  711. trace_event_name(&tu->tp.call),
  712. entry->vaddr[0]);
  713. data = DATAOF_TRACE_ENTRY(entry, false);
  714. }
  715. for (i = 0; i < tu->tp.nr_args; i++) {
  716. struct probe_arg *parg = &tu->tp.args[i];
  717. if (!parg->type->print(s, parg->name, data + parg->offset, entry))
  718. goto out;
  719. }
  720. trace_seq_putc(s, '\n');
  721. out:
  722. return trace_handle_return(s);
  723. }
  724. typedef bool (*filter_func_t)(struct uprobe_consumer *self,
  725. enum uprobe_filter_ctx ctx,
  726. struct mm_struct *mm);
  727. static int
  728. probe_event_enable(struct trace_uprobe *tu, struct trace_event_file *file,
  729. filter_func_t filter)
  730. {
  731. bool enabled = trace_probe_is_enabled(&tu->tp);
  732. struct event_file_link *link = NULL;
  733. int ret;
  734. if (file) {
  735. if (tu->tp.flags & TP_FLAG_PROFILE)
  736. return -EINTR;
  737. link = kmalloc(sizeof(*link), GFP_KERNEL);
  738. if (!link)
  739. return -ENOMEM;
  740. link->file = file;
  741. list_add_tail_rcu(&link->list, &tu->tp.files);
  742. tu->tp.flags |= TP_FLAG_TRACE;
  743. } else {
  744. if (tu->tp.flags & TP_FLAG_TRACE)
  745. return -EINTR;
  746. tu->tp.flags |= TP_FLAG_PROFILE;
  747. }
  748. WARN_ON(!uprobe_filter_is_empty(&tu->filter));
  749. if (enabled)
  750. return 0;
  751. ret = uprobe_buffer_enable();
  752. if (ret)
  753. goto err_flags;
  754. tu->consumer.filter = filter;
  755. tu->inode = d_real_inode(tu->path.dentry);
  756. ret = uprobe_register(tu->inode, tu->offset, &tu->consumer);
  757. if (ret)
  758. goto err_buffer;
  759. return 0;
  760. err_buffer:
  761. uprobe_buffer_disable();
  762. err_flags:
  763. if (file) {
  764. list_del(&link->list);
  765. kfree(link);
  766. tu->tp.flags &= ~TP_FLAG_TRACE;
  767. } else {
  768. tu->tp.flags &= ~TP_FLAG_PROFILE;
  769. }
  770. return ret;
  771. }
  772. static void
  773. probe_event_disable(struct trace_uprobe *tu, struct trace_event_file *file)
  774. {
  775. if (!trace_probe_is_enabled(&tu->tp))
  776. return;
  777. if (file) {
  778. struct event_file_link *link;
  779. link = find_event_file_link(&tu->tp, file);
  780. if (!link)
  781. return;
  782. list_del_rcu(&link->list);
  783. /* synchronize with u{,ret}probe_trace_func */
  784. synchronize_sched();
  785. kfree(link);
  786. if (!list_empty(&tu->tp.files))
  787. return;
  788. }
  789. WARN_ON(!uprobe_filter_is_empty(&tu->filter));
  790. uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
  791. tu->inode = NULL;
  792. tu->tp.flags &= file ? ~TP_FLAG_TRACE : ~TP_FLAG_PROFILE;
  793. uprobe_buffer_disable();
  794. }
  795. static int uprobe_event_define_fields(struct trace_event_call *event_call)
  796. {
  797. int ret, i, size;
  798. struct uprobe_trace_entry_head field;
  799. struct trace_uprobe *tu = event_call->data;
  800. if (is_ret_probe(tu)) {
  801. DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_FUNC, 0);
  802. DEFINE_FIELD(unsigned long, vaddr[1], FIELD_STRING_RETIP, 0);
  803. size = SIZEOF_TRACE_ENTRY(true);
  804. } else {
  805. DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_IP, 0);
  806. size = SIZEOF_TRACE_ENTRY(false);
  807. }
  808. /* Set argument names as fields */
  809. for (i = 0; i < tu->tp.nr_args; i++) {
  810. struct probe_arg *parg = &tu->tp.args[i];
  811. ret = trace_define_field(event_call, parg->type->fmttype,
  812. parg->name, size + parg->offset,
  813. parg->type->size, parg->type->is_signed,
  814. FILTER_OTHER);
  815. if (ret)
  816. return ret;
  817. }
  818. return 0;
  819. }
  820. #ifdef CONFIG_PERF_EVENTS
  821. static bool
  822. __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
  823. {
  824. struct perf_event *event;
  825. if (filter->nr_systemwide)
  826. return true;
  827. list_for_each_entry(event, &filter->perf_events, hw.tp_list) {
  828. if (event->hw.target->mm == mm)
  829. return true;
  830. }
  831. return false;
  832. }
  833. static inline bool
  834. uprobe_filter_event(struct trace_uprobe *tu, struct perf_event *event)
  835. {
  836. return __uprobe_perf_filter(&tu->filter, event->hw.target->mm);
  837. }
  838. static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event)
  839. {
  840. bool done;
  841. write_lock(&tu->filter.rwlock);
  842. if (event->hw.target) {
  843. list_del(&event->hw.tp_list);
  844. done = tu->filter.nr_systemwide ||
  845. (event->hw.target->flags & PF_EXITING) ||
  846. uprobe_filter_event(tu, event);
  847. } else {
  848. tu->filter.nr_systemwide--;
  849. done = tu->filter.nr_systemwide;
  850. }
  851. write_unlock(&tu->filter.rwlock);
  852. if (!done)
  853. return uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
  854. return 0;
  855. }
  856. static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event)
  857. {
  858. bool done;
  859. int err;
  860. write_lock(&tu->filter.rwlock);
  861. if (event->hw.target) {
  862. /*
  863. * event->parent != NULL means copy_process(), we can avoid
  864. * uprobe_apply(). current->mm must be probed and we can rely
  865. * on dup_mmap() which preserves the already installed bp's.
  866. *
  867. * attr.enable_on_exec means that exec/mmap will install the
  868. * breakpoints we need.
  869. */
  870. done = tu->filter.nr_systemwide ||
  871. event->parent || event->attr.enable_on_exec ||
  872. uprobe_filter_event(tu, event);
  873. list_add(&event->hw.tp_list, &tu->filter.perf_events);
  874. } else {
  875. done = tu->filter.nr_systemwide;
  876. tu->filter.nr_systemwide++;
  877. }
  878. write_unlock(&tu->filter.rwlock);
  879. err = 0;
  880. if (!done) {
  881. err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
  882. if (err)
  883. uprobe_perf_close(tu, event);
  884. }
  885. return err;
  886. }
  887. static bool uprobe_perf_filter(struct uprobe_consumer *uc,
  888. enum uprobe_filter_ctx ctx, struct mm_struct *mm)
  889. {
  890. struct trace_uprobe *tu;
  891. int ret;
  892. tu = container_of(uc, struct trace_uprobe, consumer);
  893. read_lock(&tu->filter.rwlock);
  894. ret = __uprobe_perf_filter(&tu->filter, mm);
  895. read_unlock(&tu->filter.rwlock);
  896. return ret;
  897. }
  898. static void __uprobe_perf_func(struct trace_uprobe *tu,
  899. unsigned long func, struct pt_regs *regs,
  900. struct uprobe_cpu_buffer *ucb, int dsize)
  901. {
  902. struct trace_event_call *call = &tu->tp.call;
  903. struct uprobe_trace_entry_head *entry;
  904. struct hlist_head *head;
  905. void *data;
  906. int size, esize;
  907. int rctx;
  908. if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs))
  909. return;
  910. esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
  911. size = esize + tu->tp.size + dsize;
  912. size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32);
  913. if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough"))
  914. return;
  915. preempt_disable();
  916. head = this_cpu_ptr(call->perf_events);
  917. if (hlist_empty(head))
  918. goto out;
  919. entry = perf_trace_buf_alloc(size, NULL, &rctx);
  920. if (!entry)
  921. goto out;
  922. if (is_ret_probe(tu)) {
  923. entry->vaddr[0] = func;
  924. entry->vaddr[1] = instruction_pointer(regs);
  925. data = DATAOF_TRACE_ENTRY(entry, true);
  926. } else {
  927. entry->vaddr[0] = instruction_pointer(regs);
  928. data = DATAOF_TRACE_ENTRY(entry, false);
  929. }
  930. memcpy(data, ucb->buf, tu->tp.size + dsize);
  931. if (size - esize > tu->tp.size + dsize) {
  932. int len = tu->tp.size + dsize;
  933. memset(data + len, 0, size - esize - len);
  934. }
  935. perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
  936. head, NULL);
  937. out:
  938. preempt_enable();
  939. }
  940. /* uprobe profile handler */
  941. static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs,
  942. struct uprobe_cpu_buffer *ucb, int dsize)
  943. {
  944. if (!uprobe_perf_filter(&tu->consumer, 0, current->mm))
  945. return UPROBE_HANDLER_REMOVE;
  946. if (!is_ret_probe(tu))
  947. __uprobe_perf_func(tu, 0, regs, ucb, dsize);
  948. return 0;
  949. }
  950. static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func,
  951. struct pt_regs *regs,
  952. struct uprobe_cpu_buffer *ucb, int dsize)
  953. {
  954. __uprobe_perf_func(tu, func, regs, ucb, dsize);
  955. }
  956. int bpf_get_uprobe_info(const struct perf_event *event, u32 *fd_type,
  957. const char **filename, u64 *probe_offset,
  958. bool perf_type_tracepoint)
  959. {
  960. const char *pevent = trace_event_name(event->tp_event);
  961. const char *group = event->tp_event->class->system;
  962. struct trace_uprobe *tu;
  963. if (perf_type_tracepoint)
  964. tu = find_probe_event(pevent, group);
  965. else
  966. tu = event->tp_event->data;
  967. if (!tu)
  968. return -EINVAL;
  969. *fd_type = is_ret_probe(tu) ? BPF_FD_TYPE_URETPROBE
  970. : BPF_FD_TYPE_UPROBE;
  971. *filename = tu->filename;
  972. *probe_offset = tu->offset;
  973. return 0;
  974. }
  975. #endif /* CONFIG_PERF_EVENTS */
  976. static int
  977. trace_uprobe_register(struct trace_event_call *event, enum trace_reg type,
  978. void *data)
  979. {
  980. struct trace_uprobe *tu = event->data;
  981. struct trace_event_file *file = data;
  982. switch (type) {
  983. case TRACE_REG_REGISTER:
  984. return probe_event_enable(tu, file, NULL);
  985. case TRACE_REG_UNREGISTER:
  986. probe_event_disable(tu, file);
  987. return 0;
  988. #ifdef CONFIG_PERF_EVENTS
  989. case TRACE_REG_PERF_REGISTER:
  990. return probe_event_enable(tu, NULL, uprobe_perf_filter);
  991. case TRACE_REG_PERF_UNREGISTER:
  992. probe_event_disable(tu, NULL);
  993. return 0;
  994. case TRACE_REG_PERF_OPEN:
  995. return uprobe_perf_open(tu, data);
  996. case TRACE_REG_PERF_CLOSE:
  997. return uprobe_perf_close(tu, data);
  998. #endif
  999. default:
  1000. return 0;
  1001. }
  1002. return 0;
  1003. }
  1004. static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
  1005. {
  1006. struct trace_uprobe *tu;
  1007. struct uprobe_dispatch_data udd;
  1008. struct uprobe_cpu_buffer *ucb;
  1009. int dsize, esize;
  1010. int ret = 0;
  1011. tu = container_of(con, struct trace_uprobe, consumer);
  1012. tu->nhit++;
  1013. udd.tu = tu;
  1014. udd.bp_addr = instruction_pointer(regs);
  1015. current->utask->vaddr = (unsigned long) &udd;
  1016. if (WARN_ON_ONCE(!uprobe_cpu_buffer))
  1017. return 0;
  1018. dsize = __get_data_size(&tu->tp, regs);
  1019. esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
  1020. ucb = uprobe_buffer_get();
  1021. store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize);
  1022. if (tu->tp.flags & TP_FLAG_TRACE)
  1023. ret |= uprobe_trace_func(tu, regs, ucb, dsize);
  1024. #ifdef CONFIG_PERF_EVENTS
  1025. if (tu->tp.flags & TP_FLAG_PROFILE)
  1026. ret |= uprobe_perf_func(tu, regs, ucb, dsize);
  1027. #endif
  1028. uprobe_buffer_put(ucb);
  1029. return ret;
  1030. }
  1031. static int uretprobe_dispatcher(struct uprobe_consumer *con,
  1032. unsigned long func, struct pt_regs *regs)
  1033. {
  1034. struct trace_uprobe *tu;
  1035. struct uprobe_dispatch_data udd;
  1036. struct uprobe_cpu_buffer *ucb;
  1037. int dsize, esize;
  1038. tu = container_of(con, struct trace_uprobe, consumer);
  1039. udd.tu = tu;
  1040. udd.bp_addr = func;
  1041. current->utask->vaddr = (unsigned long) &udd;
  1042. if (WARN_ON_ONCE(!uprobe_cpu_buffer))
  1043. return 0;
  1044. dsize = __get_data_size(&tu->tp, regs);
  1045. esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
  1046. ucb = uprobe_buffer_get();
  1047. store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize);
  1048. if (tu->tp.flags & TP_FLAG_TRACE)
  1049. uretprobe_trace_func(tu, func, regs, ucb, dsize);
  1050. #ifdef CONFIG_PERF_EVENTS
  1051. if (tu->tp.flags & TP_FLAG_PROFILE)
  1052. uretprobe_perf_func(tu, func, regs, ucb, dsize);
  1053. #endif
  1054. uprobe_buffer_put(ucb);
  1055. return 0;
  1056. }
  1057. static struct trace_event_functions uprobe_funcs = {
  1058. .trace = print_uprobe_event
  1059. };
  1060. static inline void init_trace_event_call(struct trace_uprobe *tu,
  1061. struct trace_event_call *call)
  1062. {
  1063. INIT_LIST_HEAD(&call->class->fields);
  1064. call->event.funcs = &uprobe_funcs;
  1065. call->class->define_fields = uprobe_event_define_fields;
  1066. call->flags = TRACE_EVENT_FL_UPROBE;
  1067. call->class->reg = trace_uprobe_register;
  1068. call->data = tu;
  1069. }
  1070. static int register_uprobe_event(struct trace_uprobe *tu)
  1071. {
  1072. struct trace_event_call *call = &tu->tp.call;
  1073. int ret = 0;
  1074. init_trace_event_call(tu, call);
  1075. if (set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0)
  1076. return -ENOMEM;
  1077. ret = register_trace_event(&call->event);
  1078. if (!ret) {
  1079. kfree(call->print_fmt);
  1080. return -ENODEV;
  1081. }
  1082. ret = trace_add_event_call(call);
  1083. if (ret) {
  1084. pr_info("Failed to register uprobe event: %s\n",
  1085. trace_event_name(call));
  1086. kfree(call->print_fmt);
  1087. unregister_trace_event(&call->event);
  1088. }
  1089. return ret;
  1090. }
  1091. static int unregister_uprobe_event(struct trace_uprobe *tu)
  1092. {
  1093. int ret;
  1094. /* tu->event is unregistered in trace_remove_event_call() */
  1095. ret = trace_remove_event_call(&tu->tp.call);
  1096. if (ret)
  1097. return ret;
  1098. kfree(tu->tp.call.print_fmt);
  1099. tu->tp.call.print_fmt = NULL;
  1100. return 0;
  1101. }
  1102. #ifdef CONFIG_PERF_EVENTS
  1103. struct trace_event_call *
  1104. create_local_trace_uprobe(char *name, unsigned long offs, bool is_return)
  1105. {
  1106. struct trace_uprobe *tu;
  1107. struct path path;
  1108. int ret;
  1109. ret = kern_path(name, LOOKUP_FOLLOW, &path);
  1110. if (ret)
  1111. return ERR_PTR(ret);
  1112. if (!d_is_reg(path.dentry)) {
  1113. path_put(&path);
  1114. return ERR_PTR(-EINVAL);
  1115. }
  1116. /*
  1117. * local trace_kprobes are not added to probe_list, so they are never
  1118. * searched in find_trace_kprobe(). Therefore, there is no concern of
  1119. * duplicated name "DUMMY_EVENT" here.
  1120. */
  1121. tu = alloc_trace_uprobe(UPROBE_EVENT_SYSTEM, "DUMMY_EVENT", 0,
  1122. is_return);
  1123. if (IS_ERR(tu)) {
  1124. pr_info("Failed to allocate trace_uprobe.(%d)\n",
  1125. (int)PTR_ERR(tu));
  1126. path_put(&path);
  1127. return ERR_CAST(tu);
  1128. }
  1129. tu->offset = offs;
  1130. tu->path = path;
  1131. tu->filename = kstrdup(name, GFP_KERNEL);
  1132. init_trace_event_call(tu, &tu->tp.call);
  1133. if (set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0) {
  1134. ret = -ENOMEM;
  1135. goto error;
  1136. }
  1137. return &tu->tp.call;
  1138. error:
  1139. free_trace_uprobe(tu);
  1140. return ERR_PTR(ret);
  1141. }
  1142. void destroy_local_trace_uprobe(struct trace_event_call *event_call)
  1143. {
  1144. struct trace_uprobe *tu;
  1145. tu = container_of(event_call, struct trace_uprobe, tp.call);
  1146. kfree(tu->tp.call.print_fmt);
  1147. tu->tp.call.print_fmt = NULL;
  1148. free_trace_uprobe(tu);
  1149. }
  1150. #endif /* CONFIG_PERF_EVENTS */
  1151. /* Make a trace interface for controling probe points */
  1152. static __init int init_uprobe_trace(void)
  1153. {
  1154. struct dentry *d_tracer;
  1155. d_tracer = tracing_init_dentry();
  1156. if (IS_ERR(d_tracer))
  1157. return 0;
  1158. trace_create_file("uprobe_events", 0644, d_tracer,
  1159. NULL, &uprobe_events_ops);
  1160. /* Profile interface */
  1161. trace_create_file("uprobe_profile", 0444, d_tracer,
  1162. NULL, &uprobe_profile_ops);
  1163. return 0;
  1164. }
  1165. fs_initcall(init_uprobe_trace);