bpf_trace.c 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167
  1. /* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
  2. * Copyright (c) 2016 Facebook
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of version 2 of the GNU General Public
  6. * License as published by the Free Software Foundation.
  7. */
  8. #include <linux/kernel.h>
  9. #include <linux/types.h>
  10. #include <linux/slab.h>
  11. #include <linux/bpf.h>
  12. #include <linux/bpf_perf_event.h>
  13. #include <linux/filter.h>
  14. #include <linux/uaccess.h>
  15. #include <linux/ctype.h>
  16. #include <linux/kprobes.h>
  17. #include <linux/error-injection.h>
  18. #include "trace_probe.h"
  19. #include "trace.h"
  20. u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
  21. u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
  22. /**
  23. * trace_call_bpf - invoke BPF program
  24. * @call: tracepoint event
  25. * @ctx: opaque context pointer
  26. *
  27. * kprobe handlers execute BPF programs via this helper.
  28. * Can be used from static tracepoints in the future.
  29. *
  30. * Return: BPF programs always return an integer which is interpreted by
  31. * kprobe handler as:
  32. * 0 - return from kprobe (event is filtered out)
  33. * 1 - store kprobe event into ring buffer
  34. * Other values are reserved and currently alias to 1
  35. */
  36. unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
  37. {
  38. unsigned int ret;
  39. if (in_nmi()) /* not supported yet */
  40. return 1;
  41. preempt_disable();
  42. if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
  43. /*
  44. * since some bpf program is already running on this cpu,
  45. * don't call into another bpf program (same or different)
  46. * and don't send kprobe event into ring-buffer,
  47. * so return zero here
  48. */
  49. ret = 0;
  50. goto out;
  51. }
  52. /*
  53. * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock
  54. * to all call sites, we did a bpf_prog_array_valid() there to check
  55. * whether call->prog_array is empty or not, which is
  56. * a heurisitc to speed up execution.
  57. *
  58. * If bpf_prog_array_valid() fetched prog_array was
  59. * non-NULL, we go into trace_call_bpf() and do the actual
  60. * proper rcu_dereference() under RCU lock.
  61. * If it turns out that prog_array is NULL then, we bail out.
  62. * For the opposite, if the bpf_prog_array_valid() fetched pointer
  63. * was NULL, you'll skip the prog_array with the risk of missing
  64. * out of events when it was updated in between this and the
  65. * rcu_dereference() which is accepted risk.
  66. */
  67. ret = BPF_PROG_RUN_ARRAY_CHECK(call->prog_array, ctx, BPF_PROG_RUN);
  68. out:
  69. __this_cpu_dec(bpf_prog_active);
  70. preempt_enable();
  71. return ret;
  72. }
  73. EXPORT_SYMBOL_GPL(trace_call_bpf);
  74. #ifdef CONFIG_BPF_KPROBE_OVERRIDE
  75. BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
  76. {
  77. regs_set_return_value(regs, rc);
  78. override_function_with_return(regs);
  79. return 0;
  80. }
  81. static const struct bpf_func_proto bpf_override_return_proto = {
  82. .func = bpf_override_return,
  83. .gpl_only = true,
  84. .ret_type = RET_INTEGER,
  85. .arg1_type = ARG_PTR_TO_CTX,
  86. .arg2_type = ARG_ANYTHING,
  87. };
  88. #endif
  89. BPF_CALL_3(bpf_probe_read, void *, dst, u32, size, const void *, unsafe_ptr)
  90. {
  91. int ret;
  92. ret = probe_kernel_read(dst, unsafe_ptr, size);
  93. if (unlikely(ret < 0))
  94. memset(dst, 0, size);
  95. return ret;
  96. }
  97. static const struct bpf_func_proto bpf_probe_read_proto = {
  98. .func = bpf_probe_read,
  99. .gpl_only = true,
  100. .ret_type = RET_INTEGER,
  101. .arg1_type = ARG_PTR_TO_UNINIT_MEM,
  102. .arg2_type = ARG_CONST_SIZE_OR_ZERO,
  103. .arg3_type = ARG_ANYTHING,
  104. };
  105. BPF_CALL_3(bpf_probe_write_user, void *, unsafe_ptr, const void *, src,
  106. u32, size)
  107. {
  108. /*
  109. * Ensure we're in user context which is safe for the helper to
  110. * run. This helper has no business in a kthread.
  111. *
  112. * access_ok() should prevent writing to non-user memory, but in
  113. * some situations (nommu, temporary switch, etc) access_ok() does
  114. * not provide enough validation, hence the check on KERNEL_DS.
  115. */
  116. if (unlikely(in_interrupt() ||
  117. current->flags & (PF_KTHREAD | PF_EXITING)))
  118. return -EPERM;
  119. if (unlikely(uaccess_kernel()))
  120. return -EPERM;
  121. if (!access_ok(VERIFY_WRITE, unsafe_ptr, size))
  122. return -EPERM;
  123. return probe_kernel_write(unsafe_ptr, src, size);
  124. }
  125. static const struct bpf_func_proto bpf_probe_write_user_proto = {
  126. .func = bpf_probe_write_user,
  127. .gpl_only = true,
  128. .ret_type = RET_INTEGER,
  129. .arg1_type = ARG_ANYTHING,
  130. .arg2_type = ARG_PTR_TO_MEM,
  131. .arg3_type = ARG_CONST_SIZE,
  132. };
  133. static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
  134. {
  135. pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
  136. current->comm, task_pid_nr(current));
  137. return &bpf_probe_write_user_proto;
  138. }
  139. /*
  140. * Only limited trace_printk() conversion specifiers allowed:
  141. * %d %i %u %x %ld %li %lu %lx %lld %lli %llu %llx %p %s
  142. */
  143. BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
  144. u64, arg2, u64, arg3)
  145. {
  146. bool str_seen = false;
  147. int mod[3] = {};
  148. int fmt_cnt = 0;
  149. u64 unsafe_addr;
  150. char buf[64];
  151. int i;
  152. /*
  153. * bpf_check()->check_func_arg()->check_stack_boundary()
  154. * guarantees that fmt points to bpf program stack,
  155. * fmt_size bytes of it were initialized and fmt_size > 0
  156. */
  157. if (fmt[--fmt_size] != 0)
  158. return -EINVAL;
  159. /* check format string for allowed specifiers */
  160. for (i = 0; i < fmt_size; i++) {
  161. if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i]))
  162. return -EINVAL;
  163. if (fmt[i] != '%')
  164. continue;
  165. if (fmt_cnt >= 3)
  166. return -EINVAL;
  167. /* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */
  168. i++;
  169. if (fmt[i] == 'l') {
  170. mod[fmt_cnt]++;
  171. i++;
  172. } else if (fmt[i] == 'p' || fmt[i] == 's') {
  173. mod[fmt_cnt]++;
  174. i++;
  175. if (!isspace(fmt[i]) && !ispunct(fmt[i]) && fmt[i] != 0)
  176. return -EINVAL;
  177. fmt_cnt++;
  178. if (fmt[i - 1] == 's') {
  179. if (str_seen)
  180. /* allow only one '%s' per fmt string */
  181. return -EINVAL;
  182. str_seen = true;
  183. switch (fmt_cnt) {
  184. case 1:
  185. unsafe_addr = arg1;
  186. arg1 = (long) buf;
  187. break;
  188. case 2:
  189. unsafe_addr = arg2;
  190. arg2 = (long) buf;
  191. break;
  192. case 3:
  193. unsafe_addr = arg3;
  194. arg3 = (long) buf;
  195. break;
  196. }
  197. buf[0] = 0;
  198. strncpy_from_unsafe(buf,
  199. (void *) (long) unsafe_addr,
  200. sizeof(buf));
  201. }
  202. continue;
  203. }
  204. if (fmt[i] == 'l') {
  205. mod[fmt_cnt]++;
  206. i++;
  207. }
  208. if (fmt[i] != 'i' && fmt[i] != 'd' &&
  209. fmt[i] != 'u' && fmt[i] != 'x')
  210. return -EINVAL;
  211. fmt_cnt++;
  212. }
  213. /* Horrid workaround for getting va_list handling working with different
  214. * argument type combinations generically for 32 and 64 bit archs.
  215. */
  216. #define __BPF_TP_EMIT() __BPF_ARG3_TP()
  217. #define __BPF_TP(...) \
  218. __trace_printk(0 /* Fake ip */, \
  219. fmt, ##__VA_ARGS__)
  220. #define __BPF_ARG1_TP(...) \
  221. ((mod[0] == 2 || (mod[0] == 1 && __BITS_PER_LONG == 64)) \
  222. ? __BPF_TP(arg1, ##__VA_ARGS__) \
  223. : ((mod[0] == 1 || (mod[0] == 0 && __BITS_PER_LONG == 32)) \
  224. ? __BPF_TP((long)arg1, ##__VA_ARGS__) \
  225. : __BPF_TP((u32)arg1, ##__VA_ARGS__)))
  226. #define __BPF_ARG2_TP(...) \
  227. ((mod[1] == 2 || (mod[1] == 1 && __BITS_PER_LONG == 64)) \
  228. ? __BPF_ARG1_TP(arg2, ##__VA_ARGS__) \
  229. : ((mod[1] == 1 || (mod[1] == 0 && __BITS_PER_LONG == 32)) \
  230. ? __BPF_ARG1_TP((long)arg2, ##__VA_ARGS__) \
  231. : __BPF_ARG1_TP((u32)arg2, ##__VA_ARGS__)))
  232. #define __BPF_ARG3_TP(...) \
  233. ((mod[2] == 2 || (mod[2] == 1 && __BITS_PER_LONG == 64)) \
  234. ? __BPF_ARG2_TP(arg3, ##__VA_ARGS__) \
  235. : ((mod[2] == 1 || (mod[2] == 0 && __BITS_PER_LONG == 32)) \
  236. ? __BPF_ARG2_TP((long)arg3, ##__VA_ARGS__) \
  237. : __BPF_ARG2_TP((u32)arg3, ##__VA_ARGS__)))
  238. return __BPF_TP_EMIT();
  239. }
  240. static const struct bpf_func_proto bpf_trace_printk_proto = {
  241. .func = bpf_trace_printk,
  242. .gpl_only = true,
  243. .ret_type = RET_INTEGER,
  244. .arg1_type = ARG_PTR_TO_MEM,
  245. .arg2_type = ARG_CONST_SIZE,
  246. };
  247. const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
  248. {
  249. /*
  250. * this program might be calling bpf_trace_printk,
  251. * so allocate per-cpu printk buffers
  252. */
  253. trace_printk_init_buffers();
  254. return &bpf_trace_printk_proto;
  255. }
  256. static __always_inline int
  257. get_map_perf_counter(struct bpf_map *map, u64 flags,
  258. u64 *value, u64 *enabled, u64 *running)
  259. {
  260. struct bpf_array *array = container_of(map, struct bpf_array, map);
  261. unsigned int cpu = smp_processor_id();
  262. u64 index = flags & BPF_F_INDEX_MASK;
  263. struct bpf_event_entry *ee;
  264. if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
  265. return -EINVAL;
  266. if (index == BPF_F_CURRENT_CPU)
  267. index = cpu;
  268. if (unlikely(index >= array->map.max_entries))
  269. return -E2BIG;
  270. ee = READ_ONCE(array->ptrs[index]);
  271. if (!ee)
  272. return -ENOENT;
  273. return perf_event_read_local(ee->event, value, enabled, running);
  274. }
  275. BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
  276. {
  277. u64 value = 0;
  278. int err;
  279. err = get_map_perf_counter(map, flags, &value, NULL, NULL);
  280. /*
  281. * this api is ugly since we miss [-22..-2] range of valid
  282. * counter values, but that's uapi
  283. */
  284. if (err)
  285. return err;
  286. return value;
  287. }
  288. static const struct bpf_func_proto bpf_perf_event_read_proto = {
  289. .func = bpf_perf_event_read,
  290. .gpl_only = true,
  291. .ret_type = RET_INTEGER,
  292. .arg1_type = ARG_CONST_MAP_PTR,
  293. .arg2_type = ARG_ANYTHING,
  294. };
  295. BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags,
  296. struct bpf_perf_event_value *, buf, u32, size)
  297. {
  298. int err = -EINVAL;
  299. if (unlikely(size != sizeof(struct bpf_perf_event_value)))
  300. goto clear;
  301. err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled,
  302. &buf->running);
  303. if (unlikely(err))
  304. goto clear;
  305. return 0;
  306. clear:
  307. memset(buf, 0, size);
  308. return err;
  309. }
  310. static const struct bpf_func_proto bpf_perf_event_read_value_proto = {
  311. .func = bpf_perf_event_read_value,
  312. .gpl_only = true,
  313. .ret_type = RET_INTEGER,
  314. .arg1_type = ARG_CONST_MAP_PTR,
  315. .arg2_type = ARG_ANYTHING,
  316. .arg3_type = ARG_PTR_TO_UNINIT_MEM,
  317. .arg4_type = ARG_CONST_SIZE,
  318. };
  319. static DEFINE_PER_CPU(struct perf_sample_data, bpf_trace_sd);
  320. static __always_inline u64
  321. __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
  322. u64 flags, struct perf_sample_data *sd)
  323. {
  324. struct bpf_array *array = container_of(map, struct bpf_array, map);
  325. unsigned int cpu = smp_processor_id();
  326. u64 index = flags & BPF_F_INDEX_MASK;
  327. struct bpf_event_entry *ee;
  328. struct perf_event *event;
  329. if (index == BPF_F_CURRENT_CPU)
  330. index = cpu;
  331. if (unlikely(index >= array->map.max_entries))
  332. return -E2BIG;
  333. ee = READ_ONCE(array->ptrs[index]);
  334. if (!ee)
  335. return -ENOENT;
  336. event = ee->event;
  337. if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
  338. event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
  339. return -EINVAL;
  340. if (unlikely(event->oncpu != cpu))
  341. return -EOPNOTSUPP;
  342. perf_event_output(event, sd, regs);
  343. return 0;
  344. }
  345. BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
  346. u64, flags, void *, data, u64, size)
  347. {
  348. struct perf_sample_data *sd = this_cpu_ptr(&bpf_trace_sd);
  349. struct perf_raw_record raw = {
  350. .frag = {
  351. .size = size,
  352. .data = data,
  353. },
  354. };
  355. if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
  356. return -EINVAL;
  357. perf_sample_data_init(sd, 0, 0);
  358. sd->raw = &raw;
  359. return __bpf_perf_event_output(regs, map, flags, sd);
  360. }
  361. static const struct bpf_func_proto bpf_perf_event_output_proto = {
  362. .func = bpf_perf_event_output,
  363. .gpl_only = true,
  364. .ret_type = RET_INTEGER,
  365. .arg1_type = ARG_PTR_TO_CTX,
  366. .arg2_type = ARG_CONST_MAP_PTR,
  367. .arg3_type = ARG_ANYTHING,
  368. .arg4_type = ARG_PTR_TO_MEM,
  369. .arg5_type = ARG_CONST_SIZE_OR_ZERO,
  370. };
  371. static DEFINE_PER_CPU(struct pt_regs, bpf_pt_regs);
  372. static DEFINE_PER_CPU(struct perf_sample_data, bpf_misc_sd);
  373. u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
  374. void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
  375. {
  376. struct perf_sample_data *sd = this_cpu_ptr(&bpf_misc_sd);
  377. struct pt_regs *regs = this_cpu_ptr(&bpf_pt_regs);
  378. struct perf_raw_frag frag = {
  379. .copy = ctx_copy,
  380. .size = ctx_size,
  381. .data = ctx,
  382. };
  383. struct perf_raw_record raw = {
  384. .frag = {
  385. {
  386. .next = ctx_size ? &frag : NULL,
  387. },
  388. .size = meta_size,
  389. .data = meta,
  390. },
  391. };
  392. perf_fetch_caller_regs(regs);
  393. perf_sample_data_init(sd, 0, 0);
  394. sd->raw = &raw;
  395. return __bpf_perf_event_output(regs, map, flags, sd);
  396. }
  397. BPF_CALL_0(bpf_get_current_task)
  398. {
  399. return (long) current;
  400. }
  401. static const struct bpf_func_proto bpf_get_current_task_proto = {
  402. .func = bpf_get_current_task,
  403. .gpl_only = true,
  404. .ret_type = RET_INTEGER,
  405. };
  406. BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
  407. {
  408. struct bpf_array *array = container_of(map, struct bpf_array, map);
  409. struct cgroup *cgrp;
  410. if (unlikely(in_interrupt()))
  411. return -EINVAL;
  412. if (unlikely(idx >= array->map.max_entries))
  413. return -E2BIG;
  414. cgrp = READ_ONCE(array->ptrs[idx]);
  415. if (unlikely(!cgrp))
  416. return -EAGAIN;
  417. return task_under_cgroup_hierarchy(current, cgrp);
  418. }
  419. static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
  420. .func = bpf_current_task_under_cgroup,
  421. .gpl_only = false,
  422. .ret_type = RET_INTEGER,
  423. .arg1_type = ARG_CONST_MAP_PTR,
  424. .arg2_type = ARG_ANYTHING,
  425. };
  426. BPF_CALL_3(bpf_probe_read_str, void *, dst, u32, size,
  427. const void *, unsafe_ptr)
  428. {
  429. int ret;
  430. /*
  431. * The strncpy_from_unsafe() call will likely not fill the entire
  432. * buffer, but that's okay in this circumstance as we're probing
  433. * arbitrary memory anyway similar to bpf_probe_read() and might
  434. * as well probe the stack. Thus, memory is explicitly cleared
  435. * only in error case, so that improper users ignoring return
  436. * code altogether don't copy garbage; otherwise length of string
  437. * is returned that can be used for bpf_perf_event_output() et al.
  438. */
  439. ret = strncpy_from_unsafe(dst, unsafe_ptr, size);
  440. if (unlikely(ret < 0))
  441. memset(dst, 0, size);
  442. return ret;
  443. }
  444. static const struct bpf_func_proto bpf_probe_read_str_proto = {
  445. .func = bpf_probe_read_str,
  446. .gpl_only = true,
  447. .ret_type = RET_INTEGER,
  448. .arg1_type = ARG_PTR_TO_UNINIT_MEM,
  449. .arg2_type = ARG_CONST_SIZE_OR_ZERO,
  450. .arg3_type = ARG_ANYTHING,
  451. };
  452. static const struct bpf_func_proto *
  453. tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
  454. {
  455. switch (func_id) {
  456. case BPF_FUNC_map_lookup_elem:
  457. return &bpf_map_lookup_elem_proto;
  458. case BPF_FUNC_map_update_elem:
  459. return &bpf_map_update_elem_proto;
  460. case BPF_FUNC_map_delete_elem:
  461. return &bpf_map_delete_elem_proto;
  462. case BPF_FUNC_probe_read:
  463. return &bpf_probe_read_proto;
  464. case BPF_FUNC_ktime_get_ns:
  465. return &bpf_ktime_get_ns_proto;
  466. case BPF_FUNC_tail_call:
  467. return &bpf_tail_call_proto;
  468. case BPF_FUNC_get_current_pid_tgid:
  469. return &bpf_get_current_pid_tgid_proto;
  470. case BPF_FUNC_get_current_task:
  471. return &bpf_get_current_task_proto;
  472. case BPF_FUNC_get_current_uid_gid:
  473. return &bpf_get_current_uid_gid_proto;
  474. case BPF_FUNC_get_current_comm:
  475. return &bpf_get_current_comm_proto;
  476. case BPF_FUNC_trace_printk:
  477. return bpf_get_trace_printk_proto();
  478. case BPF_FUNC_get_smp_processor_id:
  479. return &bpf_get_smp_processor_id_proto;
  480. case BPF_FUNC_get_numa_node_id:
  481. return &bpf_get_numa_node_id_proto;
  482. case BPF_FUNC_perf_event_read:
  483. return &bpf_perf_event_read_proto;
  484. case BPF_FUNC_probe_write_user:
  485. return bpf_get_probe_write_proto();
  486. case BPF_FUNC_current_task_under_cgroup:
  487. return &bpf_current_task_under_cgroup_proto;
  488. case BPF_FUNC_get_prandom_u32:
  489. return &bpf_get_prandom_u32_proto;
  490. case BPF_FUNC_probe_read_str:
  491. return &bpf_probe_read_str_proto;
  492. default:
  493. return NULL;
  494. }
  495. }
  496. static const struct bpf_func_proto *
  497. kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
  498. {
  499. switch (func_id) {
  500. case BPF_FUNC_perf_event_output:
  501. return &bpf_perf_event_output_proto;
  502. case BPF_FUNC_get_stackid:
  503. return &bpf_get_stackid_proto;
  504. case BPF_FUNC_get_stack:
  505. return &bpf_get_stack_proto;
  506. case BPF_FUNC_perf_event_read_value:
  507. return &bpf_perf_event_read_value_proto;
  508. #ifdef CONFIG_BPF_KPROBE_OVERRIDE
  509. case BPF_FUNC_override_return:
  510. return &bpf_override_return_proto;
  511. #endif
  512. default:
  513. return tracing_func_proto(func_id, prog);
  514. }
  515. }
  516. /* bpf+kprobe programs can access fields of 'struct pt_regs' */
  517. static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
  518. const struct bpf_prog *prog,
  519. struct bpf_insn_access_aux *info)
  520. {
  521. if (off < 0 || off >= sizeof(struct pt_regs))
  522. return false;
  523. if (type != BPF_READ)
  524. return false;
  525. if (off % size != 0)
  526. return false;
  527. /*
  528. * Assertion for 32 bit to make sure last 8 byte access
  529. * (BPF_DW) to the last 4 byte member is disallowed.
  530. */
  531. if (off + size > sizeof(struct pt_regs))
  532. return false;
  533. return true;
  534. }
  535. const struct bpf_verifier_ops kprobe_verifier_ops = {
  536. .get_func_proto = kprobe_prog_func_proto,
  537. .is_valid_access = kprobe_prog_is_valid_access,
  538. };
  539. const struct bpf_prog_ops kprobe_prog_ops = {
  540. };
  541. BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map,
  542. u64, flags, void *, data, u64, size)
  543. {
  544. struct pt_regs *regs = *(struct pt_regs **)tp_buff;
  545. /*
  546. * r1 points to perf tracepoint buffer where first 8 bytes are hidden
  547. * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
  548. * from there and call the same bpf_perf_event_output() helper inline.
  549. */
  550. return ____bpf_perf_event_output(regs, map, flags, data, size);
  551. }
  552. static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
  553. .func = bpf_perf_event_output_tp,
  554. .gpl_only = true,
  555. .ret_type = RET_INTEGER,
  556. .arg1_type = ARG_PTR_TO_CTX,
  557. .arg2_type = ARG_CONST_MAP_PTR,
  558. .arg3_type = ARG_ANYTHING,
  559. .arg4_type = ARG_PTR_TO_MEM,
  560. .arg5_type = ARG_CONST_SIZE_OR_ZERO,
  561. };
  562. BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
  563. u64, flags)
  564. {
  565. struct pt_regs *regs = *(struct pt_regs **)tp_buff;
  566. /*
  567. * Same comment as in bpf_perf_event_output_tp(), only that this time
  568. * the other helper's function body cannot be inlined due to being
  569. * external, thus we need to call raw helper function.
  570. */
  571. return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
  572. flags, 0, 0);
  573. }
  574. static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
  575. .func = bpf_get_stackid_tp,
  576. .gpl_only = true,
  577. .ret_type = RET_INTEGER,
  578. .arg1_type = ARG_PTR_TO_CTX,
  579. .arg2_type = ARG_CONST_MAP_PTR,
  580. .arg3_type = ARG_ANYTHING,
  581. };
  582. BPF_CALL_4(bpf_get_stack_tp, void *, tp_buff, void *, buf, u32, size,
  583. u64, flags)
  584. {
  585. struct pt_regs *regs = *(struct pt_regs **)tp_buff;
  586. return bpf_get_stack((unsigned long) regs, (unsigned long) buf,
  587. (unsigned long) size, flags, 0);
  588. }
  589. static const struct bpf_func_proto bpf_get_stack_proto_tp = {
  590. .func = bpf_get_stack_tp,
  591. .gpl_only = true,
  592. .ret_type = RET_INTEGER,
  593. .arg1_type = ARG_PTR_TO_CTX,
  594. .arg2_type = ARG_PTR_TO_UNINIT_MEM,
  595. .arg3_type = ARG_CONST_SIZE_OR_ZERO,
  596. .arg4_type = ARG_ANYTHING,
  597. };
  598. static const struct bpf_func_proto *
  599. tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
  600. {
  601. switch (func_id) {
  602. case BPF_FUNC_perf_event_output:
  603. return &bpf_perf_event_output_proto_tp;
  604. case BPF_FUNC_get_stackid:
  605. return &bpf_get_stackid_proto_tp;
  606. case BPF_FUNC_get_stack:
  607. return &bpf_get_stack_proto_tp;
  608. default:
  609. return tracing_func_proto(func_id, prog);
  610. }
  611. }
  612. static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
  613. const struct bpf_prog *prog,
  614. struct bpf_insn_access_aux *info)
  615. {
  616. if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
  617. return false;
  618. if (type != BPF_READ)
  619. return false;
  620. if (off % size != 0)
  621. return false;
  622. BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64));
  623. return true;
  624. }
  625. const struct bpf_verifier_ops tracepoint_verifier_ops = {
  626. .get_func_proto = tp_prog_func_proto,
  627. .is_valid_access = tp_prog_is_valid_access,
  628. };
  629. const struct bpf_prog_ops tracepoint_prog_ops = {
  630. };
  631. BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx,
  632. struct bpf_perf_event_value *, buf, u32, size)
  633. {
  634. int err = -EINVAL;
  635. if (unlikely(size != sizeof(struct bpf_perf_event_value)))
  636. goto clear;
  637. err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled,
  638. &buf->running);
  639. if (unlikely(err))
  640. goto clear;
  641. return 0;
  642. clear:
  643. memset(buf, 0, size);
  644. return err;
  645. }
  646. static const struct bpf_func_proto bpf_perf_prog_read_value_proto = {
  647. .func = bpf_perf_prog_read_value,
  648. .gpl_only = true,
  649. .ret_type = RET_INTEGER,
  650. .arg1_type = ARG_PTR_TO_CTX,
  651. .arg2_type = ARG_PTR_TO_UNINIT_MEM,
  652. .arg3_type = ARG_CONST_SIZE,
  653. };
  654. static const struct bpf_func_proto *
  655. pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
  656. {
  657. switch (func_id) {
  658. case BPF_FUNC_perf_event_output:
  659. return &bpf_perf_event_output_proto_tp;
  660. case BPF_FUNC_get_stackid:
  661. return &bpf_get_stackid_proto_tp;
  662. case BPF_FUNC_get_stack:
  663. return &bpf_get_stack_proto_tp;
  664. case BPF_FUNC_perf_prog_read_value:
  665. return &bpf_perf_prog_read_value_proto;
  666. default:
  667. return tracing_func_proto(func_id, prog);
  668. }
  669. }
  670. /*
  671. * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp
  672. * to avoid potential recursive reuse issue when/if tracepoints are added
  673. * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack
  674. */
  675. static DEFINE_PER_CPU(struct pt_regs, bpf_raw_tp_regs);
  676. BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args,
  677. struct bpf_map *, map, u64, flags, void *, data, u64, size)
  678. {
  679. struct pt_regs *regs = this_cpu_ptr(&bpf_raw_tp_regs);
  680. perf_fetch_caller_regs(regs);
  681. return ____bpf_perf_event_output(regs, map, flags, data, size);
  682. }
  683. static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {
  684. .func = bpf_perf_event_output_raw_tp,
  685. .gpl_only = true,
  686. .ret_type = RET_INTEGER,
  687. .arg1_type = ARG_PTR_TO_CTX,
  688. .arg2_type = ARG_CONST_MAP_PTR,
  689. .arg3_type = ARG_ANYTHING,
  690. .arg4_type = ARG_PTR_TO_MEM,
  691. .arg5_type = ARG_CONST_SIZE_OR_ZERO,
  692. };
  693. BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args,
  694. struct bpf_map *, map, u64, flags)
  695. {
  696. struct pt_regs *regs = this_cpu_ptr(&bpf_raw_tp_regs);
  697. perf_fetch_caller_regs(regs);
  698. /* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */
  699. return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
  700. flags, 0, 0);
  701. }
  702. static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = {
  703. .func = bpf_get_stackid_raw_tp,
  704. .gpl_only = true,
  705. .ret_type = RET_INTEGER,
  706. .arg1_type = ARG_PTR_TO_CTX,
  707. .arg2_type = ARG_CONST_MAP_PTR,
  708. .arg3_type = ARG_ANYTHING,
  709. };
  710. BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args,
  711. void *, buf, u32, size, u64, flags)
  712. {
  713. struct pt_regs *regs = this_cpu_ptr(&bpf_raw_tp_regs);
  714. perf_fetch_caller_regs(regs);
  715. return bpf_get_stack((unsigned long) regs, (unsigned long) buf,
  716. (unsigned long) size, flags, 0);
  717. }
  718. static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = {
  719. .func = bpf_get_stack_raw_tp,
  720. .gpl_only = true,
  721. .ret_type = RET_INTEGER,
  722. .arg1_type = ARG_PTR_TO_CTX,
  723. .arg2_type = ARG_PTR_TO_MEM,
  724. .arg3_type = ARG_CONST_SIZE_OR_ZERO,
  725. .arg4_type = ARG_ANYTHING,
  726. };
  727. static const struct bpf_func_proto *
  728. raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
  729. {
  730. switch (func_id) {
  731. case BPF_FUNC_perf_event_output:
  732. return &bpf_perf_event_output_proto_raw_tp;
  733. case BPF_FUNC_get_stackid:
  734. return &bpf_get_stackid_proto_raw_tp;
  735. case BPF_FUNC_get_stack:
  736. return &bpf_get_stack_proto_raw_tp;
  737. default:
  738. return tracing_func_proto(func_id, prog);
  739. }
  740. }
  741. static bool raw_tp_prog_is_valid_access(int off, int size,
  742. enum bpf_access_type type,
  743. const struct bpf_prog *prog,
  744. struct bpf_insn_access_aux *info)
  745. {
  746. /* largest tracepoint in the kernel has 12 args */
  747. if (off < 0 || off >= sizeof(__u64) * 12)
  748. return false;
  749. if (type != BPF_READ)
  750. return false;
  751. if (off % size != 0)
  752. return false;
  753. return true;
  754. }
  755. const struct bpf_verifier_ops raw_tracepoint_verifier_ops = {
  756. .get_func_proto = raw_tp_prog_func_proto,
  757. .is_valid_access = raw_tp_prog_is_valid_access,
  758. };
  759. const struct bpf_prog_ops raw_tracepoint_prog_ops = {
  760. };
  761. static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
  762. const struct bpf_prog *prog,
  763. struct bpf_insn_access_aux *info)
  764. {
  765. const int size_u64 = sizeof(u64);
  766. if (off < 0 || off >= sizeof(struct bpf_perf_event_data))
  767. return false;
  768. if (type != BPF_READ)
  769. return false;
  770. if (off % size != 0)
  771. return false;
  772. switch (off) {
  773. case bpf_ctx_range(struct bpf_perf_event_data, sample_period):
  774. bpf_ctx_record_field_size(info, size_u64);
  775. if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
  776. return false;
  777. break;
  778. case bpf_ctx_range(struct bpf_perf_event_data, addr):
  779. bpf_ctx_record_field_size(info, size_u64);
  780. if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
  781. return false;
  782. break;
  783. default:
  784. if (size != sizeof(long))
  785. return false;
  786. }
  787. return true;
  788. }
  789. static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
  790. const struct bpf_insn *si,
  791. struct bpf_insn *insn_buf,
  792. struct bpf_prog *prog, u32 *target_size)
  793. {
  794. struct bpf_insn *insn = insn_buf;
  795. switch (si->off) {
  796. case offsetof(struct bpf_perf_event_data, sample_period):
  797. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
  798. data), si->dst_reg, si->src_reg,
  799. offsetof(struct bpf_perf_event_data_kern, data));
  800. *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
  801. bpf_target_off(struct perf_sample_data, period, 8,
  802. target_size));
  803. break;
  804. case offsetof(struct bpf_perf_event_data, addr):
  805. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
  806. data), si->dst_reg, si->src_reg,
  807. offsetof(struct bpf_perf_event_data_kern, data));
  808. *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
  809. bpf_target_off(struct perf_sample_data, addr, 8,
  810. target_size));
  811. break;
  812. default:
  813. *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
  814. regs), si->dst_reg, si->src_reg,
  815. offsetof(struct bpf_perf_event_data_kern, regs));
  816. *insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg,
  817. si->off);
  818. break;
  819. }
  820. return insn - insn_buf;
  821. }
  822. const struct bpf_verifier_ops perf_event_verifier_ops = {
  823. .get_func_proto = pe_prog_func_proto,
  824. .is_valid_access = pe_prog_is_valid_access,
  825. .convert_ctx_access = pe_prog_convert_ctx_access,
  826. };
  827. const struct bpf_prog_ops perf_event_prog_ops = {
  828. };
  829. static DEFINE_MUTEX(bpf_event_mutex);
  830. #define BPF_TRACE_MAX_PROGS 64
  831. int perf_event_attach_bpf_prog(struct perf_event *event,
  832. struct bpf_prog *prog)
  833. {
  834. struct bpf_prog_array __rcu *old_array;
  835. struct bpf_prog_array *new_array;
  836. int ret = -EEXIST;
  837. /*
  838. * Kprobe override only works if they are on the function entry,
  839. * and only if they are on the opt-in list.
  840. */
  841. if (prog->kprobe_override &&
  842. (!trace_kprobe_on_func_entry(event->tp_event) ||
  843. !trace_kprobe_error_injectable(event->tp_event)))
  844. return -EINVAL;
  845. mutex_lock(&bpf_event_mutex);
  846. if (event->prog)
  847. goto unlock;
  848. old_array = event->tp_event->prog_array;
  849. if (old_array &&
  850. bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) {
  851. ret = -E2BIG;
  852. goto unlock;
  853. }
  854. ret = bpf_prog_array_copy(old_array, NULL, prog, &new_array);
  855. if (ret < 0)
  856. goto unlock;
  857. /* set the new array to event->tp_event and set event->prog */
  858. event->prog = prog;
  859. rcu_assign_pointer(event->tp_event->prog_array, new_array);
  860. bpf_prog_array_free(old_array);
  861. unlock:
  862. mutex_unlock(&bpf_event_mutex);
  863. return ret;
  864. }
  865. void perf_event_detach_bpf_prog(struct perf_event *event)
  866. {
  867. struct bpf_prog_array __rcu *old_array;
  868. struct bpf_prog_array *new_array;
  869. int ret;
  870. mutex_lock(&bpf_event_mutex);
  871. if (!event->prog)
  872. goto unlock;
  873. old_array = event->tp_event->prog_array;
  874. ret = bpf_prog_array_copy(old_array, event->prog, NULL, &new_array);
  875. if (ret < 0) {
  876. bpf_prog_array_delete_safe(old_array, event->prog);
  877. } else {
  878. rcu_assign_pointer(event->tp_event->prog_array, new_array);
  879. bpf_prog_array_free(old_array);
  880. }
  881. bpf_prog_put(event->prog);
  882. event->prog = NULL;
  883. unlock:
  884. mutex_unlock(&bpf_event_mutex);
  885. }
  886. int perf_event_query_prog_array(struct perf_event *event, void __user *info)
  887. {
  888. struct perf_event_query_bpf __user *uquery = info;
  889. struct perf_event_query_bpf query = {};
  890. u32 *ids, prog_cnt, ids_len;
  891. int ret;
  892. if (!capable(CAP_SYS_ADMIN))
  893. return -EPERM;
  894. if (event->attr.type != PERF_TYPE_TRACEPOINT)
  895. return -EINVAL;
  896. if (copy_from_user(&query, uquery, sizeof(query)))
  897. return -EFAULT;
  898. ids_len = query.ids_len;
  899. if (ids_len > BPF_TRACE_MAX_PROGS)
  900. return -E2BIG;
  901. ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN);
  902. if (!ids)
  903. return -ENOMEM;
  904. /*
  905. * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which
  906. * is required when user only wants to check for uquery->prog_cnt.
  907. * There is no need to check for it since the case is handled
  908. * gracefully in bpf_prog_array_copy_info.
  909. */
  910. mutex_lock(&bpf_event_mutex);
  911. ret = bpf_prog_array_copy_info(event->tp_event->prog_array,
  912. ids,
  913. ids_len,
  914. &prog_cnt);
  915. mutex_unlock(&bpf_event_mutex);
  916. if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) ||
  917. copy_to_user(uquery->ids, ids, ids_len * sizeof(u32)))
  918. ret = -EFAULT;
  919. kfree(ids);
  920. return ret;
  921. }
  922. extern struct bpf_raw_event_map __start__bpf_raw_tp[];
  923. extern struct bpf_raw_event_map __stop__bpf_raw_tp[];
  924. struct bpf_raw_event_map *bpf_find_raw_tracepoint(const char *name)
  925. {
  926. struct bpf_raw_event_map *btp = __start__bpf_raw_tp;
  927. for (; btp < __stop__bpf_raw_tp; btp++) {
  928. if (!strcmp(btp->tp->name, name))
  929. return btp;
  930. }
  931. return NULL;
  932. }
  933. static __always_inline
  934. void __bpf_trace_run(struct bpf_prog *prog, u64 *args)
  935. {
  936. rcu_read_lock();
  937. preempt_disable();
  938. (void) BPF_PROG_RUN(prog, args);
  939. preempt_enable();
  940. rcu_read_unlock();
  941. }
  942. #define UNPACK(...) __VA_ARGS__
  943. #define REPEAT_1(FN, DL, X, ...) FN(X)
  944. #define REPEAT_2(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__)
  945. #define REPEAT_3(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__)
  946. #define REPEAT_4(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__)
  947. #define REPEAT_5(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__)
  948. #define REPEAT_6(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__)
  949. #define REPEAT_7(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__)
  950. #define REPEAT_8(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__)
  951. #define REPEAT_9(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__)
  952. #define REPEAT_10(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__)
  953. #define REPEAT_11(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__)
  954. #define REPEAT_12(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__)
  955. #define REPEAT(X, FN, DL, ...) REPEAT_##X(FN, DL, __VA_ARGS__)
  956. #define SARG(X) u64 arg##X
  957. #define COPY(X) args[X] = arg##X
  958. #define __DL_COM (,)
  959. #define __DL_SEM (;)
  960. #define __SEQ_0_11 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
  961. #define BPF_TRACE_DEFN_x(x) \
  962. void bpf_trace_run##x(struct bpf_prog *prog, \
  963. REPEAT(x, SARG, __DL_COM, __SEQ_0_11)) \
  964. { \
  965. u64 args[x]; \
  966. REPEAT(x, COPY, __DL_SEM, __SEQ_0_11); \
  967. __bpf_trace_run(prog, args); \
  968. } \
  969. EXPORT_SYMBOL_GPL(bpf_trace_run##x)
  970. BPF_TRACE_DEFN_x(1);
  971. BPF_TRACE_DEFN_x(2);
  972. BPF_TRACE_DEFN_x(3);
  973. BPF_TRACE_DEFN_x(4);
  974. BPF_TRACE_DEFN_x(5);
  975. BPF_TRACE_DEFN_x(6);
  976. BPF_TRACE_DEFN_x(7);
  977. BPF_TRACE_DEFN_x(8);
  978. BPF_TRACE_DEFN_x(9);
  979. BPF_TRACE_DEFN_x(10);
  980. BPF_TRACE_DEFN_x(11);
  981. BPF_TRACE_DEFN_x(12);
  982. static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
  983. {
  984. struct tracepoint *tp = btp->tp;
  985. /*
  986. * check that program doesn't access arguments beyond what's
  987. * available in this tracepoint
  988. */
  989. if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64))
  990. return -EINVAL;
  991. return tracepoint_probe_register(tp, (void *)btp->bpf_func, prog);
  992. }
  993. int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
  994. {
  995. int err;
  996. mutex_lock(&bpf_event_mutex);
  997. err = __bpf_probe_register(btp, prog);
  998. mutex_unlock(&bpf_event_mutex);
  999. return err;
  1000. }
  1001. int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
  1002. {
  1003. int err;
  1004. mutex_lock(&bpf_event_mutex);
  1005. err = tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
  1006. mutex_unlock(&bpf_event_mutex);
  1007. return err;
  1008. }