bpf.h 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618
  1. /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
  2. *
  3. * This program is free software; you can redistribute it and/or
  4. * modify it under the terms of version 2 of the GNU General Public
  5. * License as published by the Free Software Foundation.
  6. */
  7. #ifndef _LINUX_BPF_H
  8. #define _LINUX_BPF_H 1
  9. #include <uapi/linux/bpf.h>
  10. #include <linux/workqueue.h>
  11. #include <linux/file.h>
  12. #include <linux/percpu.h>
  13. #include <linux/err.h>
  14. #include <linux/rbtree_latch.h>
  15. #include <linux/numa.h>
  16. #include <linux/wait.h>
  17. struct bpf_verifier_env;
  18. struct perf_event;
  19. struct bpf_prog;
  20. struct bpf_map;
  21. /* map is generic key/value storage optionally accesible by eBPF programs */
  22. struct bpf_map_ops {
  23. /* funcs callable from userspace (via syscall) */
  24. int (*map_alloc_check)(union bpf_attr *attr);
  25. struct bpf_map *(*map_alloc)(union bpf_attr *attr);
  26. void (*map_release)(struct bpf_map *map, struct file *map_file);
  27. void (*map_free)(struct bpf_map *map);
  28. int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key);
  29. /* funcs callable from userspace and from eBPF programs */
  30. void *(*map_lookup_elem)(struct bpf_map *map, void *key);
  31. int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags);
  32. int (*map_delete_elem)(struct bpf_map *map, void *key);
  33. /* funcs called by prog_array and perf_event_array map */
  34. void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file,
  35. int fd);
  36. void (*map_fd_put_ptr)(void *ptr);
  37. u32 (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf);
  38. u32 (*map_fd_sys_lookup_elem)(void *ptr);
  39. };
  40. struct bpf_map {
  41. /* 1st cacheline with read-mostly members of which some
  42. * are also accessed in fast-path (e.g. ops, max_entries).
  43. */
  44. const struct bpf_map_ops *ops ____cacheline_aligned;
  45. struct bpf_map *inner_map_meta;
  46. #ifdef CONFIG_SECURITY
  47. void *security;
  48. #endif
  49. enum bpf_map_type map_type;
  50. u32 key_size;
  51. u32 value_size;
  52. u32 max_entries;
  53. u32 map_flags;
  54. u32 pages;
  55. u32 id;
  56. int numa_node;
  57. bool unpriv_array;
  58. /* 7 bytes hole */
  59. /* 2nd cacheline with misc members to avoid false sharing
  60. * particularly with refcounting.
  61. */
  62. struct user_struct *user ____cacheline_aligned;
  63. atomic_t refcnt;
  64. atomic_t usercnt;
  65. struct work_struct work;
  66. char name[BPF_OBJ_NAME_LEN];
  67. };
  68. /* function argument constraints */
  69. enum bpf_arg_type {
  70. ARG_DONTCARE = 0, /* unused argument in helper function */
  71. /* the following constraints used to prototype
  72. * bpf_map_lookup/update/delete_elem() functions
  73. */
  74. ARG_CONST_MAP_PTR, /* const argument used as pointer to bpf_map */
  75. ARG_PTR_TO_MAP_KEY, /* pointer to stack used as map key */
  76. ARG_PTR_TO_MAP_VALUE, /* pointer to stack used as map value */
  77. /* the following constraints used to prototype bpf_memcmp() and other
  78. * functions that access data on eBPF program stack
  79. */
  80. ARG_PTR_TO_MEM, /* pointer to valid memory (stack, packet, map value) */
  81. ARG_PTR_TO_MEM_OR_NULL, /* pointer to valid memory or NULL */
  82. ARG_PTR_TO_UNINIT_MEM, /* pointer to memory does not need to be initialized,
  83. * helper function must fill all bytes or clear
  84. * them in error case.
  85. */
  86. ARG_CONST_SIZE, /* number of bytes accessed from memory */
  87. ARG_CONST_SIZE_OR_ZERO, /* number of bytes accessed from memory or 0 */
  88. ARG_PTR_TO_CTX, /* pointer to context */
  89. ARG_ANYTHING, /* any (initialized) argument is ok */
  90. };
  91. /* type of values returned from helper functions */
  92. enum bpf_return_type {
  93. RET_INTEGER, /* function returns integer */
  94. RET_VOID, /* function doesn't return anything */
  95. RET_PTR_TO_MAP_VALUE_OR_NULL, /* returns a pointer to map elem value or NULL */
  96. };
  97. /* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs
  98. * to in-kernel helper functions and for adjusting imm32 field in BPF_CALL
  99. * instructions after verifying
  100. */
  101. struct bpf_func_proto {
  102. u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
  103. bool gpl_only;
  104. bool pkt_access;
  105. enum bpf_return_type ret_type;
  106. enum bpf_arg_type arg1_type;
  107. enum bpf_arg_type arg2_type;
  108. enum bpf_arg_type arg3_type;
  109. enum bpf_arg_type arg4_type;
  110. enum bpf_arg_type arg5_type;
  111. };
  112. /* bpf_context is intentionally undefined structure. Pointer to bpf_context is
  113. * the first argument to eBPF programs.
  114. * For socket filters: 'struct bpf_context *' == 'struct sk_buff *'
  115. */
  116. struct bpf_context;
  117. enum bpf_access_type {
  118. BPF_READ = 1,
  119. BPF_WRITE = 2
  120. };
  121. /* types of values stored in eBPF registers */
  122. /* Pointer types represent:
  123. * pointer
  124. * pointer + imm
  125. * pointer + (u16) var
  126. * pointer + (u16) var + imm
  127. * if (range > 0) then [ptr, ptr + range - off) is safe to access
  128. * if (id > 0) means that some 'var' was added
  129. * if (off > 0) means that 'imm' was added
  130. */
  131. enum bpf_reg_type {
  132. NOT_INIT = 0, /* nothing was written into register */
  133. SCALAR_VALUE, /* reg doesn't contain a valid pointer */
  134. PTR_TO_CTX, /* reg points to bpf_context */
  135. CONST_PTR_TO_MAP, /* reg points to struct bpf_map */
  136. PTR_TO_MAP_VALUE, /* reg points to map element value */
  137. PTR_TO_MAP_VALUE_OR_NULL,/* points to map elem value or NULL */
  138. PTR_TO_STACK, /* reg == frame_pointer + offset */
  139. PTR_TO_PACKET_META, /* skb->data - meta_len */
  140. PTR_TO_PACKET, /* reg points to skb->data */
  141. PTR_TO_PACKET_END, /* skb->data + headlen */
  142. };
  143. /* The information passed from prog-specific *_is_valid_access
  144. * back to the verifier.
  145. */
  146. struct bpf_insn_access_aux {
  147. enum bpf_reg_type reg_type;
  148. int ctx_field_size;
  149. };
  150. static inline void
  151. bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size)
  152. {
  153. aux->ctx_field_size = size;
  154. }
  155. struct bpf_prog_ops {
  156. int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr,
  157. union bpf_attr __user *uattr);
  158. };
  159. struct bpf_verifier_ops {
  160. /* return eBPF function prototype for verification */
  161. const struct bpf_func_proto *(*get_func_proto)(enum bpf_func_id func_id);
  162. /* return true if 'size' wide access at offset 'off' within bpf_context
  163. * with 'type' (read or write) is allowed
  164. */
  165. bool (*is_valid_access)(int off, int size, enum bpf_access_type type,
  166. struct bpf_insn_access_aux *info);
  167. int (*gen_prologue)(struct bpf_insn *insn, bool direct_write,
  168. const struct bpf_prog *prog);
  169. u32 (*convert_ctx_access)(enum bpf_access_type type,
  170. const struct bpf_insn *src,
  171. struct bpf_insn *dst,
  172. struct bpf_prog *prog, u32 *target_size);
  173. };
  174. struct bpf_prog_offload_ops {
  175. int (*insn_hook)(struct bpf_verifier_env *env,
  176. int insn_idx, int prev_insn_idx);
  177. };
  178. struct bpf_dev_offload {
  179. struct bpf_prog *prog;
  180. struct net_device *netdev;
  181. void *dev_priv;
  182. struct list_head offloads;
  183. bool dev_state;
  184. const struct bpf_prog_offload_ops *dev_ops;
  185. };
  186. struct bpf_prog_aux {
  187. atomic_t refcnt;
  188. u32 used_map_cnt;
  189. u32 max_ctx_offset;
  190. u32 stack_depth;
  191. u32 id;
  192. u32 func_cnt;
  193. bool offload_requested;
  194. struct bpf_prog **func;
  195. void *jit_data; /* JIT specific data. arch dependent */
  196. struct latch_tree_node ksym_tnode;
  197. struct list_head ksym_lnode;
  198. const struct bpf_prog_ops *ops;
  199. struct bpf_map **used_maps;
  200. struct bpf_prog *prog;
  201. struct user_struct *user;
  202. u64 load_time; /* ns since boottime */
  203. char name[BPF_OBJ_NAME_LEN];
  204. #ifdef CONFIG_SECURITY
  205. void *security;
  206. #endif
  207. struct bpf_dev_offload *offload;
  208. union {
  209. struct work_struct work;
  210. struct rcu_head rcu;
  211. };
  212. };
  213. struct bpf_array {
  214. struct bpf_map map;
  215. u32 elem_size;
  216. u32 index_mask;
  217. /* 'ownership' of prog_array is claimed by the first program that
  218. * is going to use this map or by the first program which FD is stored
  219. * in the map to make sure that all callers and callees have the same
  220. * prog_type and JITed flag
  221. */
  222. enum bpf_prog_type owner_prog_type;
  223. bool owner_jited;
  224. union {
  225. char value[0] __aligned(8);
  226. void *ptrs[0] __aligned(8);
  227. void __percpu *pptrs[0] __aligned(8);
  228. };
  229. };
  230. #define MAX_TAIL_CALL_CNT 32
  231. struct bpf_event_entry {
  232. struct perf_event *event;
  233. struct file *perf_file;
  234. struct file *map_file;
  235. struct rcu_head rcu;
  236. };
  237. bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
  238. int bpf_prog_calc_tag(struct bpf_prog *fp);
  239. const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
  240. typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src,
  241. unsigned long off, unsigned long len);
  242. u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
  243. void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy);
  244. int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
  245. union bpf_attr __user *uattr);
  246. int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
  247. union bpf_attr __user *uattr);
  248. /* an array of programs to be executed under rcu_lock.
  249. *
  250. * Typical usage:
  251. * ret = BPF_PROG_RUN_ARRAY(&bpf_prog_array, ctx, BPF_PROG_RUN);
  252. *
  253. * the structure returned by bpf_prog_array_alloc() should be populated
  254. * with program pointers and the last pointer must be NULL.
  255. * The user has to keep refcnt on the program and make sure the program
  256. * is removed from the array before bpf_prog_put().
  257. * The 'struct bpf_prog_array *' should only be replaced with xchg()
  258. * since other cpus are walking the array of pointers in parallel.
  259. */
  260. struct bpf_prog_array {
  261. struct rcu_head rcu;
  262. struct bpf_prog *progs[0];
  263. };
  264. struct bpf_prog_array __rcu *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags);
  265. void bpf_prog_array_free(struct bpf_prog_array __rcu *progs);
  266. int bpf_prog_array_length(struct bpf_prog_array __rcu *progs);
  267. int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
  268. __u32 __user *prog_ids, u32 cnt);
  269. void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *progs,
  270. struct bpf_prog *old_prog);
  271. int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array,
  272. __u32 __user *prog_ids, u32 request_cnt,
  273. __u32 __user *prog_cnt);
  274. int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
  275. struct bpf_prog *exclude_prog,
  276. struct bpf_prog *include_prog,
  277. struct bpf_prog_array **new_array);
  278. #define __BPF_PROG_RUN_ARRAY(array, ctx, func, check_non_null) \
  279. ({ \
  280. struct bpf_prog **_prog, *__prog; \
  281. struct bpf_prog_array *_array; \
  282. u32 _ret = 1; \
  283. rcu_read_lock(); \
  284. _array = rcu_dereference(array); \
  285. if (unlikely(check_non_null && !_array))\
  286. goto _out; \
  287. _prog = _array->progs; \
  288. while ((__prog = READ_ONCE(*_prog))) { \
  289. _ret &= func(__prog, ctx); \
  290. _prog++; \
  291. } \
  292. _out: \
  293. rcu_read_unlock(); \
  294. _ret; \
  295. })
  296. #define BPF_PROG_RUN_ARRAY(array, ctx, func) \
  297. __BPF_PROG_RUN_ARRAY(array, ctx, func, false)
  298. #define BPF_PROG_RUN_ARRAY_CHECK(array, ctx, func) \
  299. __BPF_PROG_RUN_ARRAY(array, ctx, func, true)
  300. #ifdef CONFIG_BPF_SYSCALL
  301. DECLARE_PER_CPU(int, bpf_prog_active);
  302. extern const struct file_operations bpf_map_fops;
  303. extern const struct file_operations bpf_prog_fops;
  304. #define BPF_PROG_TYPE(_id, _name) \
  305. extern const struct bpf_prog_ops _name ## _prog_ops; \
  306. extern const struct bpf_verifier_ops _name ## _verifier_ops;
  307. #define BPF_MAP_TYPE(_id, _ops) \
  308. extern const struct bpf_map_ops _ops;
  309. #include <linux/bpf_types.h>
  310. #undef BPF_PROG_TYPE
  311. #undef BPF_MAP_TYPE
  312. extern const struct bpf_prog_ops bpf_offload_prog_ops;
  313. extern const struct bpf_verifier_ops tc_cls_act_analyzer_ops;
  314. extern const struct bpf_verifier_ops xdp_analyzer_ops;
  315. struct bpf_prog *bpf_prog_get(u32 ufd);
  316. struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
  317. bool attach_drv);
  318. struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog, int i);
  319. void bpf_prog_sub(struct bpf_prog *prog, int i);
  320. struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog);
  321. struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog);
  322. void bpf_prog_put(struct bpf_prog *prog);
  323. int __bpf_prog_charge(struct user_struct *user, u32 pages);
  324. void __bpf_prog_uncharge(struct user_struct *user, u32 pages);
  325. void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock);
  326. struct bpf_map *bpf_map_get_with_uref(u32 ufd);
  327. struct bpf_map *__bpf_map_get(struct fd f);
  328. struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref);
  329. void bpf_map_put_with_uref(struct bpf_map *map);
  330. void bpf_map_put(struct bpf_map *map);
  331. int bpf_map_precharge_memlock(u32 pages);
  332. void *bpf_map_area_alloc(size_t size, int numa_node);
  333. void bpf_map_area_free(void *base);
  334. void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
  335. extern int sysctl_unprivileged_bpf_disabled;
  336. int bpf_map_new_fd(struct bpf_map *map, int flags);
  337. int bpf_prog_new_fd(struct bpf_prog *prog);
  338. int bpf_obj_pin_user(u32 ufd, const char __user *pathname);
  339. int bpf_obj_get_user(const char __user *pathname, int flags);
  340. int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value);
  341. int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value);
  342. int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
  343. u64 flags);
  344. int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
  345. u64 flags);
  346. int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value);
  347. int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
  348. void *key, void *value, u64 map_flags);
  349. int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
  350. void bpf_fd_array_map_clear(struct bpf_map *map);
  351. int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
  352. void *key, void *value, u64 map_flags);
  353. int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
  354. int bpf_get_file_flag(int flags);
  355. /* memcpy that is used with 8-byte aligned pointers, power-of-8 size and
  356. * forced to use 'long' read/writes to try to atomically copy long counters.
  357. * Best-effort only. No barriers here, since it _will_ race with concurrent
  358. * updates from BPF programs. Called from bpf syscall and mostly used with
  359. * size 8 or 16 bytes, so ask compiler to inline it.
  360. */
  361. static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
  362. {
  363. const long *lsrc = src;
  364. long *ldst = dst;
  365. size /= sizeof(long);
  366. while (size--)
  367. *ldst++ = *lsrc++;
  368. }
  369. /* verify correctness of eBPF program */
  370. int bpf_check(struct bpf_prog **fp, union bpf_attr *attr);
  371. void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);
  372. /* Map specifics */
  373. struct net_device *__dev_map_lookup_elem(struct bpf_map *map, u32 key);
  374. void __dev_map_insert_ctx(struct bpf_map *map, u32 index);
  375. void __dev_map_flush(struct bpf_map *map);
  376. struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key);
  377. void __cpu_map_insert_ctx(struct bpf_map *map, u32 index);
  378. void __cpu_map_flush(struct bpf_map *map);
  379. struct xdp_buff;
  380. int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp,
  381. struct net_device *dev_rx);
  382. /* Return map's numa specified by userspace */
  383. static inline int bpf_map_attr_numa_node(const union bpf_attr *attr)
  384. {
  385. return (attr->map_flags & BPF_F_NUMA_NODE) ?
  386. attr->numa_node : NUMA_NO_NODE;
  387. }
  388. struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type);
  389. #else /* !CONFIG_BPF_SYSCALL */
  390. static inline struct bpf_prog *bpf_prog_get(u32 ufd)
  391. {
  392. return ERR_PTR(-EOPNOTSUPP);
  393. }
  394. static inline struct bpf_prog *bpf_prog_get_type_dev(u32 ufd,
  395. enum bpf_prog_type type,
  396. bool attach_drv)
  397. {
  398. return ERR_PTR(-EOPNOTSUPP);
  399. }
  400. static inline struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog,
  401. int i)
  402. {
  403. return ERR_PTR(-EOPNOTSUPP);
  404. }
  405. static inline void bpf_prog_sub(struct bpf_prog *prog, int i)
  406. {
  407. }
  408. static inline void bpf_prog_put(struct bpf_prog *prog)
  409. {
  410. }
  411. static inline struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog)
  412. {
  413. return ERR_PTR(-EOPNOTSUPP);
  414. }
  415. static inline struct bpf_prog *__must_check
  416. bpf_prog_inc_not_zero(struct bpf_prog *prog)
  417. {
  418. return ERR_PTR(-EOPNOTSUPP);
  419. }
  420. static inline int __bpf_prog_charge(struct user_struct *user, u32 pages)
  421. {
  422. return 0;
  423. }
  424. static inline void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
  425. {
  426. }
  427. static inline int bpf_obj_get_user(const char __user *pathname, int flags)
  428. {
  429. return -EOPNOTSUPP;
  430. }
  431. static inline struct net_device *__dev_map_lookup_elem(struct bpf_map *map,
  432. u32 key)
  433. {
  434. return NULL;
  435. }
  436. static inline void __dev_map_insert_ctx(struct bpf_map *map, u32 index)
  437. {
  438. }
  439. static inline void __dev_map_flush(struct bpf_map *map)
  440. {
  441. }
  442. static inline
  443. struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key)
  444. {
  445. return NULL;
  446. }
  447. static inline void __cpu_map_insert_ctx(struct bpf_map *map, u32 index)
  448. {
  449. }
  450. static inline void __cpu_map_flush(struct bpf_map *map)
  451. {
  452. }
  453. struct xdp_buff;
  454. static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu,
  455. struct xdp_buff *xdp,
  456. struct net_device *dev_rx)
  457. {
  458. return 0;
  459. }
  460. static inline struct bpf_prog *bpf_prog_get_type_path(const char *name,
  461. enum bpf_prog_type type)
  462. {
  463. return ERR_PTR(-EOPNOTSUPP);
  464. }
  465. #endif /* CONFIG_BPF_SYSCALL */
  466. static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
  467. enum bpf_prog_type type)
  468. {
  469. return bpf_prog_get_type_dev(ufd, type, false);
  470. }
  471. bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool);
  472. int bpf_prog_offload_compile(struct bpf_prog *prog);
  473. void bpf_prog_offload_destroy(struct bpf_prog *prog);
  474. int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
  475. struct bpf_prog *prog);
  476. #if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
  477. int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr);
  478. static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux)
  479. {
  480. return aux->offload_requested;
  481. }
  482. #else
  483. static inline int bpf_prog_offload_init(struct bpf_prog *prog,
  484. union bpf_attr *attr)
  485. {
  486. return -EOPNOTSUPP;
  487. }
  488. static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux)
  489. {
  490. return false;
  491. }
  492. #endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */
  493. #if defined(CONFIG_STREAM_PARSER) && defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_INET)
  494. struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key);
  495. int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type);
  496. #else
  497. static inline struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key)
  498. {
  499. return NULL;
  500. }
  501. static inline int sock_map_prog(struct bpf_map *map,
  502. struct bpf_prog *prog,
  503. u32 type)
  504. {
  505. return -EOPNOTSUPP;
  506. }
  507. #endif
  508. /* verifier prototypes for helper functions called from eBPF programs */
  509. extern const struct bpf_func_proto bpf_map_lookup_elem_proto;
  510. extern const struct bpf_func_proto bpf_map_update_elem_proto;
  511. extern const struct bpf_func_proto bpf_map_delete_elem_proto;
  512. extern const struct bpf_func_proto bpf_get_prandom_u32_proto;
  513. extern const struct bpf_func_proto bpf_get_smp_processor_id_proto;
  514. extern const struct bpf_func_proto bpf_get_numa_node_id_proto;
  515. extern const struct bpf_func_proto bpf_tail_call_proto;
  516. extern const struct bpf_func_proto bpf_ktime_get_ns_proto;
  517. extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto;
  518. extern const struct bpf_func_proto bpf_get_current_uid_gid_proto;
  519. extern const struct bpf_func_proto bpf_get_current_comm_proto;
  520. extern const struct bpf_func_proto bpf_skb_vlan_push_proto;
  521. extern const struct bpf_func_proto bpf_skb_vlan_pop_proto;
  522. extern const struct bpf_func_proto bpf_get_stackid_proto;
  523. extern const struct bpf_func_proto bpf_sock_map_update_proto;
  524. /* Shared helpers among cBPF and eBPF. */
  525. void bpf_user_rnd_init_once(void);
  526. u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
  527. #endif /* _LINUX_BPF_H */