trace_uprobe.c 32 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * uprobes-based tracing events
  4. *
  5. * Copyright (C) IBM Corporation, 2010-2012
  6. * Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
  7. */
  8. #define pr_fmt(fmt) "trace_kprobe: " fmt
  9. #include <linux/module.h>
  10. #include <linux/uaccess.h>
  11. #include <linux/uprobes.h>
  12. #include <linux/namei.h>
  13. #include <linux/string.h>
  14. #include <linux/rculist.h>
  15. #include "trace_probe.h"
  16. #include "trace_probe_tmpl.h"
  17. #define UPROBE_EVENT_SYSTEM "uprobes"
  18. struct uprobe_trace_entry_head {
  19. struct trace_entry ent;
  20. unsigned long vaddr[];
  21. };
  22. #define SIZEOF_TRACE_ENTRY(is_return) \
  23. (sizeof(struct uprobe_trace_entry_head) + \
  24. sizeof(unsigned long) * (is_return ? 2 : 1))
  25. #define DATAOF_TRACE_ENTRY(entry, is_return) \
  26. ((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return))
  27. struct trace_uprobe_filter {
  28. rwlock_t rwlock;
  29. int nr_systemwide;
  30. struct list_head perf_events;
  31. };
  32. /*
  33. * uprobe event core functions
  34. */
  35. struct trace_uprobe {
  36. struct list_head list;
  37. struct trace_uprobe_filter filter;
  38. struct uprobe_consumer consumer;
  39. struct path path;
  40. struct inode *inode;
  41. char *filename;
  42. unsigned long offset;
  43. unsigned long ref_ctr_offset;
  44. unsigned long nhit;
  45. struct trace_probe tp;
  46. };
  47. #define SIZEOF_TRACE_UPROBE(n) \
  48. (offsetof(struct trace_uprobe, tp.args) + \
  49. (sizeof(struct probe_arg) * (n)))
  50. static int register_uprobe_event(struct trace_uprobe *tu);
  51. static int unregister_uprobe_event(struct trace_uprobe *tu);
  52. static DEFINE_MUTEX(uprobe_lock);
  53. static LIST_HEAD(uprobe_list);
  54. struct uprobe_dispatch_data {
  55. struct trace_uprobe *tu;
  56. unsigned long bp_addr;
  57. };
  58. static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs);
  59. static int uretprobe_dispatcher(struct uprobe_consumer *con,
  60. unsigned long func, struct pt_regs *regs);
  61. #ifdef CONFIG_STACK_GROWSUP
  62. static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
  63. {
  64. return addr - (n * sizeof(long));
  65. }
  66. #else
  67. static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
  68. {
  69. return addr + (n * sizeof(long));
  70. }
  71. #endif
  72. static unsigned long get_user_stack_nth(struct pt_regs *regs, unsigned int n)
  73. {
  74. unsigned long ret;
  75. unsigned long addr = user_stack_pointer(regs);
  76. addr = adjust_stack_addr(addr, n);
  77. if (copy_from_user(&ret, (void __force __user *) addr, sizeof(ret)))
  78. return 0;
  79. return ret;
  80. }
  81. /*
  82. * Uprobes-specific fetch functions
  83. */
  84. static nokprobe_inline int
  85. probe_mem_read(void *dest, void *src, size_t size)
  86. {
  87. void __user *vaddr = (void __force __user *)src;
  88. return copy_from_user(dest, vaddr, size) ? -EFAULT : 0;
  89. }
  90. /*
  91. * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
  92. * length and relative data location.
  93. */
  94. static nokprobe_inline int
  95. fetch_store_string(unsigned long addr, void *dest, void *base)
  96. {
  97. long ret;
  98. u32 loc = *(u32 *)dest;
  99. int maxlen = get_loc_len(loc);
  100. u8 *dst = get_loc_data(dest, base);
  101. void __user *src = (void __force __user *) addr;
  102. if (unlikely(!maxlen))
  103. return -ENOMEM;
  104. ret = strncpy_from_user(dst, src, maxlen);
  105. if (ret >= 0) {
  106. if (ret == maxlen)
  107. dst[ret - 1] = '\0';
  108. *(u32 *)dest = make_data_loc(ret, (void *)dst - base);
  109. }
  110. return ret;
  111. }
  112. /* Return the length of string -- including null terminal byte */
  113. static nokprobe_inline int
  114. fetch_store_strlen(unsigned long addr)
  115. {
  116. int len;
  117. void __user *vaddr = (void __force __user *) addr;
  118. len = strnlen_user(vaddr, MAX_STRING_SIZE);
  119. return (len > MAX_STRING_SIZE) ? 0 : len;
  120. }
  121. static unsigned long translate_user_vaddr(unsigned long file_offset)
  122. {
  123. unsigned long base_addr;
  124. struct uprobe_dispatch_data *udd;
  125. udd = (void *) current->utask->vaddr;
  126. base_addr = udd->bp_addr - udd->tu->offset;
  127. return base_addr + file_offset;
  128. }
  129. /* Note that we don't verify it, since the code does not come from user space */
  130. static int
  131. process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest,
  132. void *base)
  133. {
  134. unsigned long val;
  135. /* 1st stage: get value from context */
  136. switch (code->op) {
  137. case FETCH_OP_REG:
  138. val = regs_get_register(regs, code->param);
  139. break;
  140. case FETCH_OP_STACK:
  141. val = get_user_stack_nth(regs, code->param);
  142. break;
  143. case FETCH_OP_STACKP:
  144. val = user_stack_pointer(regs);
  145. break;
  146. case FETCH_OP_RETVAL:
  147. val = regs_return_value(regs);
  148. break;
  149. case FETCH_OP_IMM:
  150. val = code->immediate;
  151. break;
  152. case FETCH_OP_FOFFS:
  153. val = translate_user_vaddr(code->immediate);
  154. break;
  155. default:
  156. return -EILSEQ;
  157. }
  158. code++;
  159. return process_fetch_insn_bottom(code, val, dest, base);
  160. }
  161. NOKPROBE_SYMBOL(process_fetch_insn)
  162. static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter)
  163. {
  164. rwlock_init(&filter->rwlock);
  165. filter->nr_systemwide = 0;
  166. INIT_LIST_HEAD(&filter->perf_events);
  167. }
  168. static inline bool uprobe_filter_is_empty(struct trace_uprobe_filter *filter)
  169. {
  170. return !filter->nr_systemwide && list_empty(&filter->perf_events);
  171. }
  172. static inline bool is_ret_probe(struct trace_uprobe *tu)
  173. {
  174. return tu->consumer.ret_handler != NULL;
  175. }
  176. /*
  177. * Allocate new trace_uprobe and initialize it (including uprobes).
  178. */
  179. static struct trace_uprobe *
  180. alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
  181. {
  182. struct trace_uprobe *tu;
  183. if (!event || !is_good_name(event))
  184. return ERR_PTR(-EINVAL);
  185. if (!group || !is_good_name(group))
  186. return ERR_PTR(-EINVAL);
  187. tu = kzalloc(SIZEOF_TRACE_UPROBE(nargs), GFP_KERNEL);
  188. if (!tu)
  189. return ERR_PTR(-ENOMEM);
  190. tu->tp.call.class = &tu->tp.class;
  191. tu->tp.call.name = kstrdup(event, GFP_KERNEL);
  192. if (!tu->tp.call.name)
  193. goto error;
  194. tu->tp.class.system = kstrdup(group, GFP_KERNEL);
  195. if (!tu->tp.class.system)
  196. goto error;
  197. INIT_LIST_HEAD(&tu->list);
  198. INIT_LIST_HEAD(&tu->tp.files);
  199. tu->consumer.handler = uprobe_dispatcher;
  200. if (is_ret)
  201. tu->consumer.ret_handler = uretprobe_dispatcher;
  202. init_trace_uprobe_filter(&tu->filter);
  203. return tu;
  204. error:
  205. kfree(tu->tp.call.name);
  206. kfree(tu);
  207. return ERR_PTR(-ENOMEM);
  208. }
  209. static void free_trace_uprobe(struct trace_uprobe *tu)
  210. {
  211. int i;
  212. for (i = 0; i < tu->tp.nr_args; i++)
  213. traceprobe_free_probe_arg(&tu->tp.args[i]);
  214. path_put(&tu->path);
  215. kfree(tu->tp.call.class->system);
  216. kfree(tu->tp.call.name);
  217. kfree(tu->filename);
  218. kfree(tu);
  219. }
  220. static struct trace_uprobe *find_probe_event(const char *event, const char *group)
  221. {
  222. struct trace_uprobe *tu;
  223. list_for_each_entry(tu, &uprobe_list, list)
  224. if (strcmp(trace_event_name(&tu->tp.call), event) == 0 &&
  225. strcmp(tu->tp.call.class->system, group) == 0)
  226. return tu;
  227. return NULL;
  228. }
  229. /* Unregister a trace_uprobe and probe_event: call with locking uprobe_lock */
  230. static int unregister_trace_uprobe(struct trace_uprobe *tu)
  231. {
  232. int ret;
  233. ret = unregister_uprobe_event(tu);
  234. if (ret)
  235. return ret;
  236. list_del(&tu->list);
  237. free_trace_uprobe(tu);
  238. return 0;
  239. }
  240. /*
  241. * Uprobe with multiple reference counter is not allowed. i.e.
  242. * If inode and offset matches, reference counter offset *must*
  243. * match as well. Though, there is one exception: If user is
  244. * replacing old trace_uprobe with new one(same group/event),
  245. * then we allow same uprobe with new reference counter as far
  246. * as the new one does not conflict with any other existing
  247. * ones.
  248. */
  249. static struct trace_uprobe *find_old_trace_uprobe(struct trace_uprobe *new)
  250. {
  251. struct trace_uprobe *tmp, *old = NULL;
  252. struct inode *new_inode = d_real_inode(new->path.dentry);
  253. old = find_probe_event(trace_event_name(&new->tp.call),
  254. new->tp.call.class->system);
  255. list_for_each_entry(tmp, &uprobe_list, list) {
  256. if ((old ? old != tmp : true) &&
  257. new_inode == d_real_inode(tmp->path.dentry) &&
  258. new->offset == tmp->offset &&
  259. new->ref_ctr_offset != tmp->ref_ctr_offset) {
  260. pr_warn("Reference counter offset mismatch.");
  261. return ERR_PTR(-EINVAL);
  262. }
  263. }
  264. return old;
  265. }
  266. /* Register a trace_uprobe and probe_event */
  267. static int register_trace_uprobe(struct trace_uprobe *tu)
  268. {
  269. struct trace_uprobe *old_tu;
  270. int ret;
  271. mutex_lock(&uprobe_lock);
  272. /* register as an event */
  273. old_tu = find_old_trace_uprobe(tu);
  274. if (IS_ERR(old_tu)) {
  275. ret = PTR_ERR(old_tu);
  276. goto end;
  277. }
  278. if (old_tu) {
  279. /* delete old event */
  280. ret = unregister_trace_uprobe(old_tu);
  281. if (ret)
  282. goto end;
  283. }
  284. ret = register_uprobe_event(tu);
  285. if (ret) {
  286. pr_warn("Failed to register probe event(%d)\n", ret);
  287. goto end;
  288. }
  289. list_add_tail(&tu->list, &uprobe_list);
  290. end:
  291. mutex_unlock(&uprobe_lock);
  292. return ret;
  293. }
  294. /*
  295. * Argument syntax:
  296. * - Add uprobe: p|r[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS]
  297. *
  298. * - Remove uprobe: -:[GRP/]EVENT
  299. */
  300. static int create_trace_uprobe(int argc, char **argv)
  301. {
  302. struct trace_uprobe *tu;
  303. char *arg, *event, *group, *filename, *rctr, *rctr_end;
  304. char buf[MAX_EVENT_NAME_LEN];
  305. struct path path;
  306. unsigned long offset, ref_ctr_offset;
  307. bool is_delete, is_return;
  308. int i, ret;
  309. ret = 0;
  310. is_delete = false;
  311. is_return = false;
  312. event = NULL;
  313. group = NULL;
  314. ref_ctr_offset = 0;
  315. /* argc must be >= 1 */
  316. if (argv[0][0] == '-')
  317. is_delete = true;
  318. else if (argv[0][0] == 'r')
  319. is_return = true;
  320. else if (argv[0][0] != 'p') {
  321. pr_info("Probe definition must be started with 'p', 'r' or '-'.\n");
  322. return -EINVAL;
  323. }
  324. if (argv[0][1] == ':') {
  325. event = &argv[0][2];
  326. arg = strchr(event, '/');
  327. if (arg) {
  328. group = event;
  329. event = arg + 1;
  330. event[-1] = '\0';
  331. if (strlen(group) == 0) {
  332. pr_info("Group name is not specified\n");
  333. return -EINVAL;
  334. }
  335. }
  336. if (strlen(event) == 0) {
  337. pr_info("Event name is not specified\n");
  338. return -EINVAL;
  339. }
  340. }
  341. if (!group)
  342. group = UPROBE_EVENT_SYSTEM;
  343. if (is_delete) {
  344. int ret;
  345. if (!event) {
  346. pr_info("Delete command needs an event name.\n");
  347. return -EINVAL;
  348. }
  349. mutex_lock(&uprobe_lock);
  350. tu = find_probe_event(event, group);
  351. if (!tu) {
  352. mutex_unlock(&uprobe_lock);
  353. pr_info("Event %s/%s doesn't exist.\n", group, event);
  354. return -ENOENT;
  355. }
  356. /* delete an event */
  357. ret = unregister_trace_uprobe(tu);
  358. mutex_unlock(&uprobe_lock);
  359. return ret;
  360. }
  361. if (argc < 2) {
  362. pr_info("Probe point is not specified.\n");
  363. return -EINVAL;
  364. }
  365. /* Find the last occurrence, in case the path contains ':' too. */
  366. arg = strrchr(argv[1], ':');
  367. if (!arg)
  368. return -EINVAL;
  369. *arg++ = '\0';
  370. filename = argv[1];
  371. ret = kern_path(filename, LOOKUP_FOLLOW, &path);
  372. if (ret)
  373. return ret;
  374. if (!d_is_reg(path.dentry)) {
  375. ret = -EINVAL;
  376. goto fail_address_parse;
  377. }
  378. /* Parse reference counter offset if specified. */
  379. rctr = strchr(arg, '(');
  380. if (rctr) {
  381. rctr_end = strchr(rctr, ')');
  382. if (rctr > rctr_end || *(rctr_end + 1) != 0) {
  383. ret = -EINVAL;
  384. pr_info("Invalid reference counter offset.\n");
  385. goto fail_address_parse;
  386. }
  387. *rctr++ = '\0';
  388. *rctr_end = '\0';
  389. ret = kstrtoul(rctr, 0, &ref_ctr_offset);
  390. if (ret) {
  391. pr_info("Invalid reference counter offset.\n");
  392. goto fail_address_parse;
  393. }
  394. }
  395. /* Parse uprobe offset. */
  396. ret = kstrtoul(arg, 0, &offset);
  397. if (ret)
  398. goto fail_address_parse;
  399. argc -= 2;
  400. argv += 2;
  401. /* setup a probe */
  402. if (!event) {
  403. char *tail;
  404. char *ptr;
  405. tail = kstrdup(kbasename(filename), GFP_KERNEL);
  406. if (!tail) {
  407. ret = -ENOMEM;
  408. goto fail_address_parse;
  409. }
  410. ptr = strpbrk(tail, ".-_");
  411. if (ptr)
  412. *ptr = '\0';
  413. snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_0x%lx", 'p', tail, offset);
  414. event = buf;
  415. kfree(tail);
  416. }
  417. tu = alloc_trace_uprobe(group, event, argc, is_return);
  418. if (IS_ERR(tu)) {
  419. pr_info("Failed to allocate trace_uprobe.(%d)\n", (int)PTR_ERR(tu));
  420. ret = PTR_ERR(tu);
  421. goto fail_address_parse;
  422. }
  423. tu->offset = offset;
  424. tu->ref_ctr_offset = ref_ctr_offset;
  425. tu->path = path;
  426. tu->filename = kstrdup(filename, GFP_KERNEL);
  427. if (!tu->filename) {
  428. pr_info("Failed to allocate filename.\n");
  429. ret = -ENOMEM;
  430. goto error;
  431. }
  432. /* parse arguments */
  433. ret = 0;
  434. for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
  435. struct probe_arg *parg = &tu->tp.args[i];
  436. /* Increment count for freeing args in error case */
  437. tu->tp.nr_args++;
  438. /* Parse argument name */
  439. arg = strchr(argv[i], '=');
  440. if (arg) {
  441. *arg++ = '\0';
  442. parg->name = kstrdup(argv[i], GFP_KERNEL);
  443. } else {
  444. arg = argv[i];
  445. /* If argument name is omitted, set "argN" */
  446. snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1);
  447. parg->name = kstrdup(buf, GFP_KERNEL);
  448. }
  449. if (!parg->name) {
  450. pr_info("Failed to allocate argument[%d] name.\n", i);
  451. ret = -ENOMEM;
  452. goto error;
  453. }
  454. if (!is_good_name(parg->name)) {
  455. pr_info("Invalid argument[%d] name: %s\n", i, parg->name);
  456. ret = -EINVAL;
  457. goto error;
  458. }
  459. if (traceprobe_conflict_field_name(parg->name, tu->tp.args, i)) {
  460. pr_info("Argument[%d] name '%s' conflicts with "
  461. "another field.\n", i, argv[i]);
  462. ret = -EINVAL;
  463. goto error;
  464. }
  465. /* Parse fetch argument */
  466. ret = traceprobe_parse_probe_arg(arg, &tu->tp.size, parg,
  467. is_return ? TPARG_FL_RETURN : 0);
  468. if (ret) {
  469. pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
  470. goto error;
  471. }
  472. }
  473. ret = register_trace_uprobe(tu);
  474. if (ret)
  475. goto error;
  476. return 0;
  477. error:
  478. free_trace_uprobe(tu);
  479. return ret;
  480. fail_address_parse:
  481. path_put(&path);
  482. pr_info("Failed to parse address or file.\n");
  483. return ret;
  484. }
  485. static int cleanup_all_probes(void)
  486. {
  487. struct trace_uprobe *tu;
  488. int ret = 0;
  489. mutex_lock(&uprobe_lock);
  490. while (!list_empty(&uprobe_list)) {
  491. tu = list_entry(uprobe_list.next, struct trace_uprobe, list);
  492. ret = unregister_trace_uprobe(tu);
  493. if (ret)
  494. break;
  495. }
  496. mutex_unlock(&uprobe_lock);
  497. return ret;
  498. }
  499. /* Probes listing interfaces */
  500. static void *probes_seq_start(struct seq_file *m, loff_t *pos)
  501. {
  502. mutex_lock(&uprobe_lock);
  503. return seq_list_start(&uprobe_list, *pos);
  504. }
  505. static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
  506. {
  507. return seq_list_next(v, &uprobe_list, pos);
  508. }
  509. static void probes_seq_stop(struct seq_file *m, void *v)
  510. {
  511. mutex_unlock(&uprobe_lock);
  512. }
  513. static int probes_seq_show(struct seq_file *m, void *v)
  514. {
  515. struct trace_uprobe *tu = v;
  516. char c = is_ret_probe(tu) ? 'r' : 'p';
  517. int i;
  518. seq_printf(m, "%c:%s/%s %s:0x%0*lx", c, tu->tp.call.class->system,
  519. trace_event_name(&tu->tp.call), tu->filename,
  520. (int)(sizeof(void *) * 2), tu->offset);
  521. if (tu->ref_ctr_offset)
  522. seq_printf(m, "(0x%lx)", tu->ref_ctr_offset);
  523. for (i = 0; i < tu->tp.nr_args; i++)
  524. seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm);
  525. seq_putc(m, '\n');
  526. return 0;
  527. }
  528. static const struct seq_operations probes_seq_op = {
  529. .start = probes_seq_start,
  530. .next = probes_seq_next,
  531. .stop = probes_seq_stop,
  532. .show = probes_seq_show
  533. };
  534. static int probes_open(struct inode *inode, struct file *file)
  535. {
  536. int ret;
  537. if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
  538. ret = cleanup_all_probes();
  539. if (ret)
  540. return ret;
  541. }
  542. return seq_open(file, &probes_seq_op);
  543. }
  544. static ssize_t probes_write(struct file *file, const char __user *buffer,
  545. size_t count, loff_t *ppos)
  546. {
  547. return trace_parse_run_command(file, buffer, count, ppos, create_trace_uprobe);
  548. }
  549. static const struct file_operations uprobe_events_ops = {
  550. .owner = THIS_MODULE,
  551. .open = probes_open,
  552. .read = seq_read,
  553. .llseek = seq_lseek,
  554. .release = seq_release,
  555. .write = probes_write,
  556. };
  557. /* Probes profiling interfaces */
  558. static int probes_profile_seq_show(struct seq_file *m, void *v)
  559. {
  560. struct trace_uprobe *tu = v;
  561. seq_printf(m, " %s %-44s %15lu\n", tu->filename,
  562. trace_event_name(&tu->tp.call), tu->nhit);
  563. return 0;
  564. }
  565. static const struct seq_operations profile_seq_op = {
  566. .start = probes_seq_start,
  567. .next = probes_seq_next,
  568. .stop = probes_seq_stop,
  569. .show = probes_profile_seq_show
  570. };
  571. static int profile_open(struct inode *inode, struct file *file)
  572. {
  573. return seq_open(file, &profile_seq_op);
  574. }
  575. static const struct file_operations uprobe_profile_ops = {
  576. .owner = THIS_MODULE,
  577. .open = profile_open,
  578. .read = seq_read,
  579. .llseek = seq_lseek,
  580. .release = seq_release,
  581. };
  582. struct uprobe_cpu_buffer {
  583. struct mutex mutex;
  584. void *buf;
  585. };
  586. static struct uprobe_cpu_buffer __percpu *uprobe_cpu_buffer;
  587. static int uprobe_buffer_refcnt;
  588. static int uprobe_buffer_init(void)
  589. {
  590. int cpu, err_cpu;
  591. uprobe_cpu_buffer = alloc_percpu(struct uprobe_cpu_buffer);
  592. if (uprobe_cpu_buffer == NULL)
  593. return -ENOMEM;
  594. for_each_possible_cpu(cpu) {
  595. struct page *p = alloc_pages_node(cpu_to_node(cpu),
  596. GFP_KERNEL, 0);
  597. if (p == NULL) {
  598. err_cpu = cpu;
  599. goto err;
  600. }
  601. per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf = page_address(p);
  602. mutex_init(&per_cpu_ptr(uprobe_cpu_buffer, cpu)->mutex);
  603. }
  604. return 0;
  605. err:
  606. for_each_possible_cpu(cpu) {
  607. if (cpu == err_cpu)
  608. break;
  609. free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf);
  610. }
  611. free_percpu(uprobe_cpu_buffer);
  612. return -ENOMEM;
  613. }
  614. static int uprobe_buffer_enable(void)
  615. {
  616. int ret = 0;
  617. BUG_ON(!mutex_is_locked(&event_mutex));
  618. if (uprobe_buffer_refcnt++ == 0) {
  619. ret = uprobe_buffer_init();
  620. if (ret < 0)
  621. uprobe_buffer_refcnt--;
  622. }
  623. return ret;
  624. }
  625. static void uprobe_buffer_disable(void)
  626. {
  627. int cpu;
  628. BUG_ON(!mutex_is_locked(&event_mutex));
  629. if (--uprobe_buffer_refcnt == 0) {
  630. for_each_possible_cpu(cpu)
  631. free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer,
  632. cpu)->buf);
  633. free_percpu(uprobe_cpu_buffer);
  634. uprobe_cpu_buffer = NULL;
  635. }
  636. }
  637. static struct uprobe_cpu_buffer *uprobe_buffer_get(void)
  638. {
  639. struct uprobe_cpu_buffer *ucb;
  640. int cpu;
  641. cpu = raw_smp_processor_id();
  642. ucb = per_cpu_ptr(uprobe_cpu_buffer, cpu);
  643. /*
  644. * Use per-cpu buffers for fastest access, but we might migrate
  645. * so the mutex makes sure we have sole access to it.
  646. */
  647. mutex_lock(&ucb->mutex);
  648. return ucb;
  649. }
  650. static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb)
  651. {
  652. mutex_unlock(&ucb->mutex);
  653. }
  654. static void __uprobe_trace_func(struct trace_uprobe *tu,
  655. unsigned long func, struct pt_regs *regs,
  656. struct uprobe_cpu_buffer *ucb, int dsize,
  657. struct trace_event_file *trace_file)
  658. {
  659. struct uprobe_trace_entry_head *entry;
  660. struct ring_buffer_event *event;
  661. struct ring_buffer *buffer;
  662. void *data;
  663. int size, esize;
  664. struct trace_event_call *call = &tu->tp.call;
  665. WARN_ON(call != trace_file->event_call);
  666. if (WARN_ON_ONCE(tu->tp.size + dsize > PAGE_SIZE))
  667. return;
  668. if (trace_trigger_soft_disabled(trace_file))
  669. return;
  670. esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
  671. size = esize + tu->tp.size + dsize;
  672. event = trace_event_buffer_lock_reserve(&buffer, trace_file,
  673. call->event.type, size, 0, 0);
  674. if (!event)
  675. return;
  676. entry = ring_buffer_event_data(event);
  677. if (is_ret_probe(tu)) {
  678. entry->vaddr[0] = func;
  679. entry->vaddr[1] = instruction_pointer(regs);
  680. data = DATAOF_TRACE_ENTRY(entry, true);
  681. } else {
  682. entry->vaddr[0] = instruction_pointer(regs);
  683. data = DATAOF_TRACE_ENTRY(entry, false);
  684. }
  685. memcpy(data, ucb->buf, tu->tp.size + dsize);
  686. event_trigger_unlock_commit(trace_file, buffer, event, entry, 0, 0);
  687. }
  688. /* uprobe handler */
  689. static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs,
  690. struct uprobe_cpu_buffer *ucb, int dsize)
  691. {
  692. struct event_file_link *link;
  693. if (is_ret_probe(tu))
  694. return 0;
  695. rcu_read_lock();
  696. list_for_each_entry_rcu(link, &tu->tp.files, list)
  697. __uprobe_trace_func(tu, 0, regs, ucb, dsize, link->file);
  698. rcu_read_unlock();
  699. return 0;
  700. }
  701. static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func,
  702. struct pt_regs *regs,
  703. struct uprobe_cpu_buffer *ucb, int dsize)
  704. {
  705. struct event_file_link *link;
  706. rcu_read_lock();
  707. list_for_each_entry_rcu(link, &tu->tp.files, list)
  708. __uprobe_trace_func(tu, func, regs, ucb, dsize, link->file);
  709. rcu_read_unlock();
  710. }
  711. /* Event entry printers */
  712. static enum print_line_t
  713. print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event)
  714. {
  715. struct uprobe_trace_entry_head *entry;
  716. struct trace_seq *s = &iter->seq;
  717. struct trace_uprobe *tu;
  718. u8 *data;
  719. entry = (struct uprobe_trace_entry_head *)iter->ent;
  720. tu = container_of(event, struct trace_uprobe, tp.call.event);
  721. if (is_ret_probe(tu)) {
  722. trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)",
  723. trace_event_name(&tu->tp.call),
  724. entry->vaddr[1], entry->vaddr[0]);
  725. data = DATAOF_TRACE_ENTRY(entry, true);
  726. } else {
  727. trace_seq_printf(s, "%s: (0x%lx)",
  728. trace_event_name(&tu->tp.call),
  729. entry->vaddr[0]);
  730. data = DATAOF_TRACE_ENTRY(entry, false);
  731. }
  732. if (print_probe_args(s, tu->tp.args, tu->tp.nr_args, data, entry) < 0)
  733. goto out;
  734. trace_seq_putc(s, '\n');
  735. out:
  736. return trace_handle_return(s);
  737. }
  738. typedef bool (*filter_func_t)(struct uprobe_consumer *self,
  739. enum uprobe_filter_ctx ctx,
  740. struct mm_struct *mm);
  741. static int
  742. probe_event_enable(struct trace_uprobe *tu, struct trace_event_file *file,
  743. filter_func_t filter)
  744. {
  745. bool enabled = trace_probe_is_enabled(&tu->tp);
  746. struct event_file_link *link = NULL;
  747. int ret;
  748. if (file) {
  749. if (tu->tp.flags & TP_FLAG_PROFILE)
  750. return -EINTR;
  751. link = kmalloc(sizeof(*link), GFP_KERNEL);
  752. if (!link)
  753. return -ENOMEM;
  754. link->file = file;
  755. list_add_tail_rcu(&link->list, &tu->tp.files);
  756. tu->tp.flags |= TP_FLAG_TRACE;
  757. } else {
  758. if (tu->tp.flags & TP_FLAG_TRACE)
  759. return -EINTR;
  760. tu->tp.flags |= TP_FLAG_PROFILE;
  761. }
  762. WARN_ON(!uprobe_filter_is_empty(&tu->filter));
  763. if (enabled)
  764. return 0;
  765. ret = uprobe_buffer_enable();
  766. if (ret)
  767. goto err_flags;
  768. tu->consumer.filter = filter;
  769. tu->inode = d_real_inode(tu->path.dentry);
  770. if (tu->ref_ctr_offset) {
  771. ret = uprobe_register_refctr(tu->inode, tu->offset,
  772. tu->ref_ctr_offset, &tu->consumer);
  773. } else {
  774. ret = uprobe_register(tu->inode, tu->offset, &tu->consumer);
  775. }
  776. if (ret)
  777. goto err_buffer;
  778. return 0;
  779. err_buffer:
  780. uprobe_buffer_disable();
  781. err_flags:
  782. if (file) {
  783. list_del(&link->list);
  784. kfree(link);
  785. tu->tp.flags &= ~TP_FLAG_TRACE;
  786. } else {
  787. tu->tp.flags &= ~TP_FLAG_PROFILE;
  788. }
  789. return ret;
  790. }
  791. static void
  792. probe_event_disable(struct trace_uprobe *tu, struct trace_event_file *file)
  793. {
  794. if (!trace_probe_is_enabled(&tu->tp))
  795. return;
  796. if (file) {
  797. struct event_file_link *link;
  798. link = find_event_file_link(&tu->tp, file);
  799. if (!link)
  800. return;
  801. list_del_rcu(&link->list);
  802. /* synchronize with u{,ret}probe_trace_func */
  803. synchronize_rcu();
  804. kfree(link);
  805. if (!list_empty(&tu->tp.files))
  806. return;
  807. }
  808. WARN_ON(!uprobe_filter_is_empty(&tu->filter));
  809. uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
  810. tu->inode = NULL;
  811. tu->tp.flags &= file ? ~TP_FLAG_TRACE : ~TP_FLAG_PROFILE;
  812. uprobe_buffer_disable();
  813. }
  814. static int uprobe_event_define_fields(struct trace_event_call *event_call)
  815. {
  816. int ret, size;
  817. struct uprobe_trace_entry_head field;
  818. struct trace_uprobe *tu = event_call->data;
  819. if (is_ret_probe(tu)) {
  820. DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_FUNC, 0);
  821. DEFINE_FIELD(unsigned long, vaddr[1], FIELD_STRING_RETIP, 0);
  822. size = SIZEOF_TRACE_ENTRY(true);
  823. } else {
  824. DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_IP, 0);
  825. size = SIZEOF_TRACE_ENTRY(false);
  826. }
  827. return traceprobe_define_arg_fields(event_call, size, &tu->tp);
  828. }
  829. #ifdef CONFIG_PERF_EVENTS
  830. static bool
  831. __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
  832. {
  833. struct perf_event *event;
  834. if (filter->nr_systemwide)
  835. return true;
  836. list_for_each_entry(event, &filter->perf_events, hw.tp_list) {
  837. if (event->hw.target->mm == mm)
  838. return true;
  839. }
  840. return false;
  841. }
  842. static inline bool
  843. uprobe_filter_event(struct trace_uprobe *tu, struct perf_event *event)
  844. {
  845. return __uprobe_perf_filter(&tu->filter, event->hw.target->mm);
  846. }
  847. static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event)
  848. {
  849. bool done;
  850. write_lock(&tu->filter.rwlock);
  851. if (event->hw.target) {
  852. list_del(&event->hw.tp_list);
  853. done = tu->filter.nr_systemwide ||
  854. (event->hw.target->flags & PF_EXITING) ||
  855. uprobe_filter_event(tu, event);
  856. } else {
  857. tu->filter.nr_systemwide--;
  858. done = tu->filter.nr_systemwide;
  859. }
  860. write_unlock(&tu->filter.rwlock);
  861. if (!done)
  862. return uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
  863. return 0;
  864. }
  865. static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event)
  866. {
  867. bool done;
  868. int err;
  869. write_lock(&tu->filter.rwlock);
  870. if (event->hw.target) {
  871. /*
  872. * event->parent != NULL means copy_process(), we can avoid
  873. * uprobe_apply(). current->mm must be probed and we can rely
  874. * on dup_mmap() which preserves the already installed bp's.
  875. *
  876. * attr.enable_on_exec means that exec/mmap will install the
  877. * breakpoints we need.
  878. */
  879. done = tu->filter.nr_systemwide ||
  880. event->parent || event->attr.enable_on_exec ||
  881. uprobe_filter_event(tu, event);
  882. list_add(&event->hw.tp_list, &tu->filter.perf_events);
  883. } else {
  884. done = tu->filter.nr_systemwide;
  885. tu->filter.nr_systemwide++;
  886. }
  887. write_unlock(&tu->filter.rwlock);
  888. err = 0;
  889. if (!done) {
  890. err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
  891. if (err)
  892. uprobe_perf_close(tu, event);
  893. }
  894. return err;
  895. }
  896. static bool uprobe_perf_filter(struct uprobe_consumer *uc,
  897. enum uprobe_filter_ctx ctx, struct mm_struct *mm)
  898. {
  899. struct trace_uprobe *tu;
  900. int ret;
  901. tu = container_of(uc, struct trace_uprobe, consumer);
  902. read_lock(&tu->filter.rwlock);
  903. ret = __uprobe_perf_filter(&tu->filter, mm);
  904. read_unlock(&tu->filter.rwlock);
  905. return ret;
  906. }
  907. static void __uprobe_perf_func(struct trace_uprobe *tu,
  908. unsigned long func, struct pt_regs *regs,
  909. struct uprobe_cpu_buffer *ucb, int dsize)
  910. {
  911. struct trace_event_call *call = &tu->tp.call;
  912. struct uprobe_trace_entry_head *entry;
  913. struct hlist_head *head;
  914. void *data;
  915. int size, esize;
  916. int rctx;
  917. if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs))
  918. return;
  919. esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
  920. size = esize + tu->tp.size + dsize;
  921. size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32);
  922. if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough"))
  923. return;
  924. preempt_disable();
  925. head = this_cpu_ptr(call->perf_events);
  926. if (hlist_empty(head))
  927. goto out;
  928. entry = perf_trace_buf_alloc(size, NULL, &rctx);
  929. if (!entry)
  930. goto out;
  931. if (is_ret_probe(tu)) {
  932. entry->vaddr[0] = func;
  933. entry->vaddr[1] = instruction_pointer(regs);
  934. data = DATAOF_TRACE_ENTRY(entry, true);
  935. } else {
  936. entry->vaddr[0] = instruction_pointer(regs);
  937. data = DATAOF_TRACE_ENTRY(entry, false);
  938. }
  939. memcpy(data, ucb->buf, tu->tp.size + dsize);
  940. if (size - esize > tu->tp.size + dsize) {
  941. int len = tu->tp.size + dsize;
  942. memset(data + len, 0, size - esize - len);
  943. }
  944. perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
  945. head, NULL);
  946. out:
  947. preempt_enable();
  948. }
  949. /* uprobe profile handler */
  950. static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs,
  951. struct uprobe_cpu_buffer *ucb, int dsize)
  952. {
  953. if (!uprobe_perf_filter(&tu->consumer, 0, current->mm))
  954. return UPROBE_HANDLER_REMOVE;
  955. if (!is_ret_probe(tu))
  956. __uprobe_perf_func(tu, 0, regs, ucb, dsize);
  957. return 0;
  958. }
  959. static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func,
  960. struct pt_regs *regs,
  961. struct uprobe_cpu_buffer *ucb, int dsize)
  962. {
  963. __uprobe_perf_func(tu, func, regs, ucb, dsize);
  964. }
  965. int bpf_get_uprobe_info(const struct perf_event *event, u32 *fd_type,
  966. const char **filename, u64 *probe_offset,
  967. bool perf_type_tracepoint)
  968. {
  969. const char *pevent = trace_event_name(event->tp_event);
  970. const char *group = event->tp_event->class->system;
  971. struct trace_uprobe *tu;
  972. if (perf_type_tracepoint)
  973. tu = find_probe_event(pevent, group);
  974. else
  975. tu = event->tp_event->data;
  976. if (!tu)
  977. return -EINVAL;
  978. *fd_type = is_ret_probe(tu) ? BPF_FD_TYPE_URETPROBE
  979. : BPF_FD_TYPE_UPROBE;
  980. *filename = tu->filename;
  981. *probe_offset = tu->offset;
  982. return 0;
  983. }
  984. #endif /* CONFIG_PERF_EVENTS */
  985. static int
  986. trace_uprobe_register(struct trace_event_call *event, enum trace_reg type,
  987. void *data)
  988. {
  989. struct trace_uprobe *tu = event->data;
  990. struct trace_event_file *file = data;
  991. switch (type) {
  992. case TRACE_REG_REGISTER:
  993. return probe_event_enable(tu, file, NULL);
  994. case TRACE_REG_UNREGISTER:
  995. probe_event_disable(tu, file);
  996. return 0;
  997. #ifdef CONFIG_PERF_EVENTS
  998. case TRACE_REG_PERF_REGISTER:
  999. return probe_event_enable(tu, NULL, uprobe_perf_filter);
  1000. case TRACE_REG_PERF_UNREGISTER:
  1001. probe_event_disable(tu, NULL);
  1002. return 0;
  1003. case TRACE_REG_PERF_OPEN:
  1004. return uprobe_perf_open(tu, data);
  1005. case TRACE_REG_PERF_CLOSE:
  1006. return uprobe_perf_close(tu, data);
  1007. #endif
  1008. default:
  1009. return 0;
  1010. }
  1011. return 0;
  1012. }
  1013. static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
  1014. {
  1015. struct trace_uprobe *tu;
  1016. struct uprobe_dispatch_data udd;
  1017. struct uprobe_cpu_buffer *ucb;
  1018. int dsize, esize;
  1019. int ret = 0;
  1020. tu = container_of(con, struct trace_uprobe, consumer);
  1021. tu->nhit++;
  1022. udd.tu = tu;
  1023. udd.bp_addr = instruction_pointer(regs);
  1024. current->utask->vaddr = (unsigned long) &udd;
  1025. if (WARN_ON_ONCE(!uprobe_cpu_buffer))
  1026. return 0;
  1027. dsize = __get_data_size(&tu->tp, regs);
  1028. esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
  1029. ucb = uprobe_buffer_get();
  1030. store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize);
  1031. if (tu->tp.flags & TP_FLAG_TRACE)
  1032. ret |= uprobe_trace_func(tu, regs, ucb, dsize);
  1033. #ifdef CONFIG_PERF_EVENTS
  1034. if (tu->tp.flags & TP_FLAG_PROFILE)
  1035. ret |= uprobe_perf_func(tu, regs, ucb, dsize);
  1036. #endif
  1037. uprobe_buffer_put(ucb);
  1038. return ret;
  1039. }
  1040. static int uretprobe_dispatcher(struct uprobe_consumer *con,
  1041. unsigned long func, struct pt_regs *regs)
  1042. {
  1043. struct trace_uprobe *tu;
  1044. struct uprobe_dispatch_data udd;
  1045. struct uprobe_cpu_buffer *ucb;
  1046. int dsize, esize;
  1047. tu = container_of(con, struct trace_uprobe, consumer);
  1048. udd.tu = tu;
  1049. udd.bp_addr = func;
  1050. current->utask->vaddr = (unsigned long) &udd;
  1051. if (WARN_ON_ONCE(!uprobe_cpu_buffer))
  1052. return 0;
  1053. dsize = __get_data_size(&tu->tp, regs);
  1054. esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
  1055. ucb = uprobe_buffer_get();
  1056. store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize);
  1057. if (tu->tp.flags & TP_FLAG_TRACE)
  1058. uretprobe_trace_func(tu, func, regs, ucb, dsize);
  1059. #ifdef CONFIG_PERF_EVENTS
  1060. if (tu->tp.flags & TP_FLAG_PROFILE)
  1061. uretprobe_perf_func(tu, func, regs, ucb, dsize);
  1062. #endif
  1063. uprobe_buffer_put(ucb);
  1064. return 0;
  1065. }
  1066. static struct trace_event_functions uprobe_funcs = {
  1067. .trace = print_uprobe_event
  1068. };
  1069. static inline void init_trace_event_call(struct trace_uprobe *tu,
  1070. struct trace_event_call *call)
  1071. {
  1072. INIT_LIST_HEAD(&call->class->fields);
  1073. call->event.funcs = &uprobe_funcs;
  1074. call->class->define_fields = uprobe_event_define_fields;
  1075. call->flags = TRACE_EVENT_FL_UPROBE;
  1076. call->class->reg = trace_uprobe_register;
  1077. call->data = tu;
  1078. }
  1079. static int register_uprobe_event(struct trace_uprobe *tu)
  1080. {
  1081. struct trace_event_call *call = &tu->tp.call;
  1082. int ret = 0;
  1083. init_trace_event_call(tu, call);
  1084. if (traceprobe_set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0)
  1085. return -ENOMEM;
  1086. ret = register_trace_event(&call->event);
  1087. if (!ret) {
  1088. kfree(call->print_fmt);
  1089. return -ENODEV;
  1090. }
  1091. ret = trace_add_event_call(call);
  1092. if (ret) {
  1093. pr_info("Failed to register uprobe event: %s\n",
  1094. trace_event_name(call));
  1095. kfree(call->print_fmt);
  1096. unregister_trace_event(&call->event);
  1097. }
  1098. return ret;
  1099. }
  1100. static int unregister_uprobe_event(struct trace_uprobe *tu)
  1101. {
  1102. int ret;
  1103. /* tu->event is unregistered in trace_remove_event_call() */
  1104. ret = trace_remove_event_call(&tu->tp.call);
  1105. if (ret)
  1106. return ret;
  1107. kfree(tu->tp.call.print_fmt);
  1108. tu->tp.call.print_fmt = NULL;
  1109. return 0;
  1110. }
  1111. #ifdef CONFIG_PERF_EVENTS
  1112. struct trace_event_call *
  1113. create_local_trace_uprobe(char *name, unsigned long offs,
  1114. unsigned long ref_ctr_offset, bool is_return)
  1115. {
  1116. struct trace_uprobe *tu;
  1117. struct path path;
  1118. int ret;
  1119. ret = kern_path(name, LOOKUP_FOLLOW, &path);
  1120. if (ret)
  1121. return ERR_PTR(ret);
  1122. if (!d_is_reg(path.dentry)) {
  1123. path_put(&path);
  1124. return ERR_PTR(-EINVAL);
  1125. }
  1126. /*
  1127. * local trace_kprobes are not added to probe_list, so they are never
  1128. * searched in find_trace_kprobe(). Therefore, there is no concern of
  1129. * duplicated name "DUMMY_EVENT" here.
  1130. */
  1131. tu = alloc_trace_uprobe(UPROBE_EVENT_SYSTEM, "DUMMY_EVENT", 0,
  1132. is_return);
  1133. if (IS_ERR(tu)) {
  1134. pr_info("Failed to allocate trace_uprobe.(%d)\n",
  1135. (int)PTR_ERR(tu));
  1136. path_put(&path);
  1137. return ERR_CAST(tu);
  1138. }
  1139. tu->offset = offs;
  1140. tu->path = path;
  1141. tu->ref_ctr_offset = ref_ctr_offset;
  1142. tu->filename = kstrdup(name, GFP_KERNEL);
  1143. init_trace_event_call(tu, &tu->tp.call);
  1144. if (traceprobe_set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0) {
  1145. ret = -ENOMEM;
  1146. goto error;
  1147. }
  1148. return &tu->tp.call;
  1149. error:
  1150. free_trace_uprobe(tu);
  1151. return ERR_PTR(ret);
  1152. }
  1153. void destroy_local_trace_uprobe(struct trace_event_call *event_call)
  1154. {
  1155. struct trace_uprobe *tu;
  1156. tu = container_of(event_call, struct trace_uprobe, tp.call);
  1157. kfree(tu->tp.call.print_fmt);
  1158. tu->tp.call.print_fmt = NULL;
  1159. free_trace_uprobe(tu);
  1160. }
  1161. #endif /* CONFIG_PERF_EVENTS */
  1162. /* Make a trace interface for controling probe points */
  1163. static __init int init_uprobe_trace(void)
  1164. {
  1165. struct dentry *d_tracer;
  1166. d_tracer = tracing_init_dentry();
  1167. if (IS_ERR(d_tracer))
  1168. return 0;
  1169. trace_create_file("uprobe_events", 0644, d_tracer,
  1170. NULL, &uprobe_events_ops);
  1171. /* Profile interface */
  1172. trace_create_file("uprobe_profile", 0444, d_tracer,
  1173. NULL, &uprobe_profile_ops);
  1174. return 0;
  1175. }
  1176. fs_initcall(init_uprobe_trace);