syscall.c 22 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006
  1. /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
  2. *
  3. * This program is free software; you can redistribute it and/or
  4. * modify it under the terms of version 2 of the GNU General Public
  5. * License as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful, but
  8. * WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  10. * General Public License for more details.
  11. */
  12. #include <linux/bpf.h>
  13. #include <linux/syscalls.h>
  14. #include <linux/slab.h>
  15. #include <linux/anon_inodes.h>
  16. #include <linux/file.h>
  17. #include <linux/license.h>
  18. #include <linux/filter.h>
  19. #include <linux/version.h>
  20. #include <linux/kernel.h>
  21. DEFINE_PER_CPU(int, bpf_prog_active);
  22. int sysctl_unprivileged_bpf_disabled __read_mostly;
  23. static LIST_HEAD(bpf_map_types);
  24. static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
  25. {
  26. struct bpf_map_type_list *tl;
  27. struct bpf_map *map;
  28. list_for_each_entry(tl, &bpf_map_types, list_node) {
  29. if (tl->type == attr->map_type) {
  30. map = tl->ops->map_alloc(attr);
  31. if (IS_ERR(map))
  32. return map;
  33. map->ops = tl->ops;
  34. map->map_type = attr->map_type;
  35. return map;
  36. }
  37. }
  38. return ERR_PTR(-EINVAL);
  39. }
  40. /* boot time registration of different map implementations */
  41. void bpf_register_map_type(struct bpf_map_type_list *tl)
  42. {
  43. list_add(&tl->list_node, &bpf_map_types);
  44. }
  45. int bpf_map_precharge_memlock(u32 pages)
  46. {
  47. struct user_struct *user = get_current_user();
  48. unsigned long memlock_limit, cur;
  49. memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
  50. cur = atomic_long_read(&user->locked_vm);
  51. free_uid(user);
  52. if (cur + pages > memlock_limit)
  53. return -EPERM;
  54. return 0;
  55. }
  56. static int bpf_map_charge_memlock(struct bpf_map *map)
  57. {
  58. struct user_struct *user = get_current_user();
  59. unsigned long memlock_limit;
  60. memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
  61. atomic_long_add(map->pages, &user->locked_vm);
  62. if (atomic_long_read(&user->locked_vm) > memlock_limit) {
  63. atomic_long_sub(map->pages, &user->locked_vm);
  64. free_uid(user);
  65. return -EPERM;
  66. }
  67. map->user = user;
  68. return 0;
  69. }
  70. static void bpf_map_uncharge_memlock(struct bpf_map *map)
  71. {
  72. struct user_struct *user = map->user;
  73. atomic_long_sub(map->pages, &user->locked_vm);
  74. free_uid(user);
  75. }
  76. /* called from workqueue */
  77. static void bpf_map_free_deferred(struct work_struct *work)
  78. {
  79. struct bpf_map *map = container_of(work, struct bpf_map, work);
  80. bpf_map_uncharge_memlock(map);
  81. /* implementation dependent freeing */
  82. map->ops->map_free(map);
  83. }
  84. static void bpf_map_put_uref(struct bpf_map *map)
  85. {
  86. if (atomic_dec_and_test(&map->usercnt)) {
  87. if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY)
  88. bpf_fd_array_map_clear(map);
  89. }
  90. }
  91. /* decrement map refcnt and schedule it for freeing via workqueue
  92. * (unrelying map implementation ops->map_free() might sleep)
  93. */
  94. void bpf_map_put(struct bpf_map *map)
  95. {
  96. if (atomic_dec_and_test(&map->refcnt)) {
  97. INIT_WORK(&map->work, bpf_map_free_deferred);
  98. schedule_work(&map->work);
  99. }
  100. }
  101. void bpf_map_put_with_uref(struct bpf_map *map)
  102. {
  103. bpf_map_put_uref(map);
  104. bpf_map_put(map);
  105. }
  106. static int bpf_map_release(struct inode *inode, struct file *filp)
  107. {
  108. struct bpf_map *map = filp->private_data;
  109. if (map->ops->map_release)
  110. map->ops->map_release(map, filp);
  111. bpf_map_put_with_uref(map);
  112. return 0;
  113. }
  114. #ifdef CONFIG_PROC_FS
  115. static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
  116. {
  117. const struct bpf_map *map = filp->private_data;
  118. const struct bpf_array *array;
  119. u32 owner_prog_type = 0;
  120. if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) {
  121. array = container_of(map, struct bpf_array, map);
  122. owner_prog_type = array->owner_prog_type;
  123. }
  124. seq_printf(m,
  125. "map_type:\t%u\n"
  126. "key_size:\t%u\n"
  127. "value_size:\t%u\n"
  128. "max_entries:\t%u\n"
  129. "map_flags:\t%#x\n"
  130. "memlock:\t%llu\n",
  131. map->map_type,
  132. map->key_size,
  133. map->value_size,
  134. map->max_entries,
  135. map->map_flags,
  136. map->pages * 1ULL << PAGE_SHIFT);
  137. if (owner_prog_type)
  138. seq_printf(m, "owner_prog_type:\t%u\n",
  139. owner_prog_type);
  140. }
  141. #endif
  142. static const struct file_operations bpf_map_fops = {
  143. #ifdef CONFIG_PROC_FS
  144. .show_fdinfo = bpf_map_show_fdinfo,
  145. #endif
  146. .release = bpf_map_release,
  147. };
  148. int bpf_map_new_fd(struct bpf_map *map)
  149. {
  150. return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
  151. O_RDWR | O_CLOEXEC);
  152. }
  153. /* helper macro to check that unused fields 'union bpf_attr' are zero */
  154. #define CHECK_ATTR(CMD) \
  155. memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
  156. sizeof(attr->CMD##_LAST_FIELD), 0, \
  157. sizeof(*attr) - \
  158. offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
  159. sizeof(attr->CMD##_LAST_FIELD)) != NULL
  160. #define BPF_MAP_CREATE_LAST_FIELD map_flags
  161. /* called via syscall */
  162. static int map_create(union bpf_attr *attr)
  163. {
  164. struct bpf_map *map;
  165. int err;
  166. err = CHECK_ATTR(BPF_MAP_CREATE);
  167. if (err)
  168. return -EINVAL;
  169. /* find map type and init map: hashtable vs rbtree vs bloom vs ... */
  170. map = find_and_alloc_map(attr);
  171. if (IS_ERR(map))
  172. return PTR_ERR(map);
  173. atomic_set(&map->refcnt, 1);
  174. atomic_set(&map->usercnt, 1);
  175. err = bpf_map_charge_memlock(map);
  176. if (err)
  177. goto free_map_nouncharge;
  178. err = bpf_map_new_fd(map);
  179. if (err < 0)
  180. /* failed to allocate fd */
  181. goto free_map;
  182. return err;
  183. free_map:
  184. bpf_map_uncharge_memlock(map);
  185. free_map_nouncharge:
  186. map->ops->map_free(map);
  187. return err;
  188. }
  189. /* if error is returned, fd is released.
  190. * On success caller should complete fd access with matching fdput()
  191. */
  192. struct bpf_map *__bpf_map_get(struct fd f)
  193. {
  194. if (!f.file)
  195. return ERR_PTR(-EBADF);
  196. if (f.file->f_op != &bpf_map_fops) {
  197. fdput(f);
  198. return ERR_PTR(-EINVAL);
  199. }
  200. return f.file->private_data;
  201. }
  202. /* prog's and map's refcnt limit */
  203. #define BPF_MAX_REFCNT 32768
  204. struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref)
  205. {
  206. if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) {
  207. atomic_dec(&map->refcnt);
  208. return ERR_PTR(-EBUSY);
  209. }
  210. if (uref)
  211. atomic_inc(&map->usercnt);
  212. return map;
  213. }
  214. struct bpf_map *bpf_map_get_with_uref(u32 ufd)
  215. {
  216. struct fd f = fdget(ufd);
  217. struct bpf_map *map;
  218. map = __bpf_map_get(f);
  219. if (IS_ERR(map))
  220. return map;
  221. map = bpf_map_inc(map, true);
  222. fdput(f);
  223. return map;
  224. }
  225. int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
  226. {
  227. return -ENOTSUPP;
  228. }
  229. /* last field in 'union bpf_attr' used by this command */
  230. #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD value
  231. static int map_lookup_elem(union bpf_attr *attr)
  232. {
  233. void __user *ukey = u64_to_user_ptr(attr->key);
  234. void __user *uvalue = u64_to_user_ptr(attr->value);
  235. int ufd = attr->map_fd;
  236. struct bpf_map *map;
  237. void *key, *value, *ptr;
  238. u32 value_size;
  239. struct fd f;
  240. int err;
  241. if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
  242. return -EINVAL;
  243. f = fdget(ufd);
  244. map = __bpf_map_get(f);
  245. if (IS_ERR(map))
  246. return PTR_ERR(map);
  247. err = -ENOMEM;
  248. key = kmalloc(map->key_size, GFP_USER);
  249. if (!key)
  250. goto err_put;
  251. err = -EFAULT;
  252. if (copy_from_user(key, ukey, map->key_size) != 0)
  253. goto free_key;
  254. if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
  255. map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
  256. map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
  257. value_size = round_up(map->value_size, 8) * num_possible_cpus();
  258. else
  259. value_size = map->value_size;
  260. err = -ENOMEM;
  261. value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
  262. if (!value)
  263. goto free_key;
  264. if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
  265. map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
  266. err = bpf_percpu_hash_copy(map, key, value);
  267. } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
  268. err = bpf_percpu_array_copy(map, key, value);
  269. } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
  270. err = bpf_stackmap_copy(map, key, value);
  271. } else {
  272. rcu_read_lock();
  273. ptr = map->ops->map_lookup_elem(map, key);
  274. if (ptr)
  275. memcpy(value, ptr, value_size);
  276. rcu_read_unlock();
  277. err = ptr ? 0 : -ENOENT;
  278. }
  279. if (err)
  280. goto free_value;
  281. err = -EFAULT;
  282. if (copy_to_user(uvalue, value, value_size) != 0)
  283. goto free_value;
  284. err = 0;
  285. free_value:
  286. kfree(value);
  287. free_key:
  288. kfree(key);
  289. err_put:
  290. fdput(f);
  291. return err;
  292. }
  293. #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
  294. static int map_update_elem(union bpf_attr *attr)
  295. {
  296. void __user *ukey = u64_to_user_ptr(attr->key);
  297. void __user *uvalue = u64_to_user_ptr(attr->value);
  298. int ufd = attr->map_fd;
  299. struct bpf_map *map;
  300. void *key, *value;
  301. u32 value_size;
  302. struct fd f;
  303. int err;
  304. if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
  305. return -EINVAL;
  306. f = fdget(ufd);
  307. map = __bpf_map_get(f);
  308. if (IS_ERR(map))
  309. return PTR_ERR(map);
  310. err = -ENOMEM;
  311. key = kmalloc(map->key_size, GFP_USER);
  312. if (!key)
  313. goto err_put;
  314. err = -EFAULT;
  315. if (copy_from_user(key, ukey, map->key_size) != 0)
  316. goto free_key;
  317. if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
  318. map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
  319. map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
  320. value_size = round_up(map->value_size, 8) * num_possible_cpus();
  321. else
  322. value_size = map->value_size;
  323. err = -ENOMEM;
  324. value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
  325. if (!value)
  326. goto free_key;
  327. err = -EFAULT;
  328. if (copy_from_user(value, uvalue, value_size) != 0)
  329. goto free_value;
  330. /* must increment bpf_prog_active to avoid kprobe+bpf triggering from
  331. * inside bpf map update or delete otherwise deadlocks are possible
  332. */
  333. preempt_disable();
  334. __this_cpu_inc(bpf_prog_active);
  335. if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
  336. map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
  337. err = bpf_percpu_hash_update(map, key, value, attr->flags);
  338. } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
  339. err = bpf_percpu_array_update(map, key, value, attr->flags);
  340. } else if (map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY ||
  341. map->map_type == BPF_MAP_TYPE_PROG_ARRAY ||
  342. map->map_type == BPF_MAP_TYPE_CGROUP_ARRAY) {
  343. rcu_read_lock();
  344. err = bpf_fd_array_map_update_elem(map, f.file, key, value,
  345. attr->flags);
  346. rcu_read_unlock();
  347. } else {
  348. rcu_read_lock();
  349. err = map->ops->map_update_elem(map, key, value, attr->flags);
  350. rcu_read_unlock();
  351. }
  352. __this_cpu_dec(bpf_prog_active);
  353. preempt_enable();
  354. free_value:
  355. kfree(value);
  356. free_key:
  357. kfree(key);
  358. err_put:
  359. fdput(f);
  360. return err;
  361. }
  362. #define BPF_MAP_DELETE_ELEM_LAST_FIELD key
  363. static int map_delete_elem(union bpf_attr *attr)
  364. {
  365. void __user *ukey = u64_to_user_ptr(attr->key);
  366. int ufd = attr->map_fd;
  367. struct bpf_map *map;
  368. struct fd f;
  369. void *key;
  370. int err;
  371. if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
  372. return -EINVAL;
  373. f = fdget(ufd);
  374. map = __bpf_map_get(f);
  375. if (IS_ERR(map))
  376. return PTR_ERR(map);
  377. err = -ENOMEM;
  378. key = kmalloc(map->key_size, GFP_USER);
  379. if (!key)
  380. goto err_put;
  381. err = -EFAULT;
  382. if (copy_from_user(key, ukey, map->key_size) != 0)
  383. goto free_key;
  384. preempt_disable();
  385. __this_cpu_inc(bpf_prog_active);
  386. rcu_read_lock();
  387. err = map->ops->map_delete_elem(map, key);
  388. rcu_read_unlock();
  389. __this_cpu_dec(bpf_prog_active);
  390. preempt_enable();
  391. free_key:
  392. kfree(key);
  393. err_put:
  394. fdput(f);
  395. return err;
  396. }
  397. /* last field in 'union bpf_attr' used by this command */
  398. #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
  399. static int map_get_next_key(union bpf_attr *attr)
  400. {
  401. void __user *ukey = u64_to_user_ptr(attr->key);
  402. void __user *unext_key = u64_to_user_ptr(attr->next_key);
  403. int ufd = attr->map_fd;
  404. struct bpf_map *map;
  405. void *key, *next_key;
  406. struct fd f;
  407. int err;
  408. if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
  409. return -EINVAL;
  410. f = fdget(ufd);
  411. map = __bpf_map_get(f);
  412. if (IS_ERR(map))
  413. return PTR_ERR(map);
  414. err = -ENOMEM;
  415. key = kmalloc(map->key_size, GFP_USER);
  416. if (!key)
  417. goto err_put;
  418. err = -EFAULT;
  419. if (copy_from_user(key, ukey, map->key_size) != 0)
  420. goto free_key;
  421. err = -ENOMEM;
  422. next_key = kmalloc(map->key_size, GFP_USER);
  423. if (!next_key)
  424. goto free_key;
  425. rcu_read_lock();
  426. err = map->ops->map_get_next_key(map, key, next_key);
  427. rcu_read_unlock();
  428. if (err)
  429. goto free_next_key;
  430. err = -EFAULT;
  431. if (copy_to_user(unext_key, next_key, map->key_size) != 0)
  432. goto free_next_key;
  433. err = 0;
  434. free_next_key:
  435. kfree(next_key);
  436. free_key:
  437. kfree(key);
  438. err_put:
  439. fdput(f);
  440. return err;
  441. }
  442. static LIST_HEAD(bpf_prog_types);
  443. static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
  444. {
  445. struct bpf_prog_type_list *tl;
  446. list_for_each_entry(tl, &bpf_prog_types, list_node) {
  447. if (tl->type == type) {
  448. prog->aux->ops = tl->ops;
  449. prog->type = type;
  450. return 0;
  451. }
  452. }
  453. return -EINVAL;
  454. }
  455. void bpf_register_prog_type(struct bpf_prog_type_list *tl)
  456. {
  457. list_add(&tl->list_node, &bpf_prog_types);
  458. }
  459. /* fixup insn->imm field of bpf_call instructions:
  460. * if (insn->imm == BPF_FUNC_map_lookup_elem)
  461. * insn->imm = bpf_map_lookup_elem - __bpf_call_base;
  462. * else if (insn->imm == BPF_FUNC_map_update_elem)
  463. * insn->imm = bpf_map_update_elem - __bpf_call_base;
  464. * else ...
  465. *
  466. * this function is called after eBPF program passed verification
  467. */
  468. static void fixup_bpf_calls(struct bpf_prog *prog)
  469. {
  470. const struct bpf_func_proto *fn;
  471. int i;
  472. for (i = 0; i < prog->len; i++) {
  473. struct bpf_insn *insn = &prog->insnsi[i];
  474. if (insn->code == (BPF_JMP | BPF_CALL)) {
  475. /* we reach here when program has bpf_call instructions
  476. * and it passed bpf_check(), means that
  477. * ops->get_func_proto must have been supplied, check it
  478. */
  479. BUG_ON(!prog->aux->ops->get_func_proto);
  480. if (insn->imm == BPF_FUNC_get_route_realm)
  481. prog->dst_needed = 1;
  482. if (insn->imm == BPF_FUNC_get_prandom_u32)
  483. bpf_user_rnd_init_once();
  484. if (insn->imm == BPF_FUNC_tail_call) {
  485. /* mark bpf_tail_call as different opcode
  486. * to avoid conditional branch in
  487. * interpeter for every normal call
  488. * and to prevent accidental JITing by
  489. * JIT compiler that doesn't support
  490. * bpf_tail_call yet
  491. */
  492. insn->imm = 0;
  493. insn->code |= BPF_X;
  494. continue;
  495. }
  496. fn = prog->aux->ops->get_func_proto(insn->imm);
  497. /* all functions that have prototype and verifier allowed
  498. * programs to call them, must be real in-kernel functions
  499. */
  500. BUG_ON(!fn->func);
  501. insn->imm = fn->func - __bpf_call_base;
  502. }
  503. }
  504. }
  505. /* drop refcnt on maps used by eBPF program and free auxilary data */
  506. static void free_used_maps(struct bpf_prog_aux *aux)
  507. {
  508. int i;
  509. for (i = 0; i < aux->used_map_cnt; i++)
  510. bpf_map_put(aux->used_maps[i]);
  511. kfree(aux->used_maps);
  512. }
  513. static int bpf_prog_charge_memlock(struct bpf_prog *prog)
  514. {
  515. struct user_struct *user = get_current_user();
  516. unsigned long memlock_limit;
  517. memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
  518. atomic_long_add(prog->pages, &user->locked_vm);
  519. if (atomic_long_read(&user->locked_vm) > memlock_limit) {
  520. atomic_long_sub(prog->pages, &user->locked_vm);
  521. free_uid(user);
  522. return -EPERM;
  523. }
  524. prog->aux->user = user;
  525. return 0;
  526. }
  527. static void bpf_prog_uncharge_memlock(struct bpf_prog *prog)
  528. {
  529. struct user_struct *user = prog->aux->user;
  530. atomic_long_sub(prog->pages, &user->locked_vm);
  531. free_uid(user);
  532. }
  533. static void __bpf_prog_put_rcu(struct rcu_head *rcu)
  534. {
  535. struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
  536. free_used_maps(aux);
  537. bpf_prog_uncharge_memlock(aux->prog);
  538. bpf_prog_free(aux->prog);
  539. }
  540. void bpf_prog_put(struct bpf_prog *prog)
  541. {
  542. if (atomic_dec_and_test(&prog->aux->refcnt))
  543. call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
  544. }
  545. EXPORT_SYMBOL_GPL(bpf_prog_put);
  546. static int bpf_prog_release(struct inode *inode, struct file *filp)
  547. {
  548. struct bpf_prog *prog = filp->private_data;
  549. bpf_prog_put(prog);
  550. return 0;
  551. }
  552. static const struct file_operations bpf_prog_fops = {
  553. .release = bpf_prog_release,
  554. };
  555. int bpf_prog_new_fd(struct bpf_prog *prog)
  556. {
  557. return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
  558. O_RDWR | O_CLOEXEC);
  559. }
  560. static struct bpf_prog *____bpf_prog_get(struct fd f)
  561. {
  562. if (!f.file)
  563. return ERR_PTR(-EBADF);
  564. if (f.file->f_op != &bpf_prog_fops) {
  565. fdput(f);
  566. return ERR_PTR(-EINVAL);
  567. }
  568. return f.file->private_data;
  569. }
  570. struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i)
  571. {
  572. if (atomic_add_return(i, &prog->aux->refcnt) > BPF_MAX_REFCNT) {
  573. atomic_sub(i, &prog->aux->refcnt);
  574. return ERR_PTR(-EBUSY);
  575. }
  576. return prog;
  577. }
  578. EXPORT_SYMBOL_GPL(bpf_prog_add);
  579. void bpf_prog_sub(struct bpf_prog *prog, int i)
  580. {
  581. /* Only to be used for undoing previous bpf_prog_add() in some
  582. * error path. We still know that another entity in our call
  583. * path holds a reference to the program, thus atomic_sub() can
  584. * be safely used in such cases!
  585. */
  586. WARN_ON(atomic_sub_return(i, &prog->aux->refcnt) == 0);
  587. }
  588. EXPORT_SYMBOL_GPL(bpf_prog_sub);
  589. struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
  590. {
  591. return bpf_prog_add(prog, 1);
  592. }
  593. EXPORT_SYMBOL_GPL(bpf_prog_inc);
  594. static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *type)
  595. {
  596. struct fd f = fdget(ufd);
  597. struct bpf_prog *prog;
  598. prog = ____bpf_prog_get(f);
  599. if (IS_ERR(prog))
  600. return prog;
  601. if (type && prog->type != *type) {
  602. prog = ERR_PTR(-EINVAL);
  603. goto out;
  604. }
  605. prog = bpf_prog_inc(prog);
  606. out:
  607. fdput(f);
  608. return prog;
  609. }
  610. struct bpf_prog *bpf_prog_get(u32 ufd)
  611. {
  612. return __bpf_prog_get(ufd, NULL);
  613. }
  614. struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type)
  615. {
  616. return __bpf_prog_get(ufd, &type);
  617. }
  618. EXPORT_SYMBOL_GPL(bpf_prog_get_type);
  619. /* last field in 'union bpf_attr' used by this command */
  620. #define BPF_PROG_LOAD_LAST_FIELD kern_version
  621. static int bpf_prog_load(union bpf_attr *attr)
  622. {
  623. enum bpf_prog_type type = attr->prog_type;
  624. struct bpf_prog *prog;
  625. int err;
  626. char license[128];
  627. bool is_gpl;
  628. if (CHECK_ATTR(BPF_PROG_LOAD))
  629. return -EINVAL;
  630. /* copy eBPF program license from user space */
  631. if (strncpy_from_user(license, u64_to_user_ptr(attr->license),
  632. sizeof(license) - 1) < 0)
  633. return -EFAULT;
  634. license[sizeof(license) - 1] = 0;
  635. /* eBPF programs must be GPL compatible to use GPL-ed functions */
  636. is_gpl = license_is_gpl_compatible(license);
  637. if (attr->insn_cnt >= BPF_MAXINSNS)
  638. return -EINVAL;
  639. if (type == BPF_PROG_TYPE_KPROBE &&
  640. attr->kern_version != LINUX_VERSION_CODE)
  641. return -EINVAL;
  642. if (type != BPF_PROG_TYPE_SOCKET_FILTER && !capable(CAP_SYS_ADMIN))
  643. return -EPERM;
  644. /* plain bpf_prog allocation */
  645. prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
  646. if (!prog)
  647. return -ENOMEM;
  648. err = bpf_prog_charge_memlock(prog);
  649. if (err)
  650. goto free_prog_nouncharge;
  651. prog->len = attr->insn_cnt;
  652. err = -EFAULT;
  653. if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns),
  654. prog->len * sizeof(struct bpf_insn)) != 0)
  655. goto free_prog;
  656. prog->orig_prog = NULL;
  657. prog->jited = 0;
  658. atomic_set(&prog->aux->refcnt, 1);
  659. prog->gpl_compatible = is_gpl ? 1 : 0;
  660. /* find program type: socket_filter vs tracing_filter */
  661. err = find_prog_type(type, prog);
  662. if (err < 0)
  663. goto free_prog;
  664. /* run eBPF verifier */
  665. err = bpf_check(&prog, attr);
  666. if (err < 0)
  667. goto free_used_maps;
  668. /* fixup BPF_CALL->imm field */
  669. fixup_bpf_calls(prog);
  670. /* eBPF program is ready to be JITed */
  671. prog = bpf_prog_select_runtime(prog, &err);
  672. if (err < 0)
  673. goto free_used_maps;
  674. err = bpf_prog_new_fd(prog);
  675. if (err < 0)
  676. /* failed to allocate fd */
  677. goto free_used_maps;
  678. return err;
  679. free_used_maps:
  680. free_used_maps(prog->aux);
  681. free_prog:
  682. bpf_prog_uncharge_memlock(prog);
  683. free_prog_nouncharge:
  684. bpf_prog_free(prog);
  685. return err;
  686. }
  687. #define BPF_OBJ_LAST_FIELD bpf_fd
  688. static int bpf_obj_pin(const union bpf_attr *attr)
  689. {
  690. if (CHECK_ATTR(BPF_OBJ))
  691. return -EINVAL;
  692. return bpf_obj_pin_user(attr->bpf_fd, u64_to_user_ptr(attr->pathname));
  693. }
  694. static int bpf_obj_get(const union bpf_attr *attr)
  695. {
  696. if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0)
  697. return -EINVAL;
  698. return bpf_obj_get_user(u64_to_user_ptr(attr->pathname));
  699. }
  700. #ifdef CONFIG_CGROUP_BPF
  701. #define BPF_PROG_ATTACH_LAST_FIELD attach_type
  702. static int bpf_prog_attach(const union bpf_attr *attr)
  703. {
  704. struct bpf_prog *prog;
  705. struct cgroup *cgrp;
  706. enum bpf_prog_type ptype;
  707. if (!capable(CAP_NET_ADMIN))
  708. return -EPERM;
  709. if (CHECK_ATTR(BPF_PROG_ATTACH))
  710. return -EINVAL;
  711. switch (attr->attach_type) {
  712. case BPF_CGROUP_INET_INGRESS:
  713. case BPF_CGROUP_INET_EGRESS:
  714. ptype = BPF_PROG_TYPE_CGROUP_SKB;
  715. break;
  716. default:
  717. return -EINVAL;
  718. }
  719. prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
  720. if (IS_ERR(prog))
  721. return PTR_ERR(prog);
  722. cgrp = cgroup_get_from_fd(attr->target_fd);
  723. if (IS_ERR(cgrp)) {
  724. bpf_prog_put(prog);
  725. return PTR_ERR(cgrp);
  726. }
  727. cgroup_bpf_update(cgrp, prog, attr->attach_type);
  728. cgroup_put(cgrp);
  729. return 0;
  730. }
  731. #define BPF_PROG_DETACH_LAST_FIELD attach_type
  732. static int bpf_prog_detach(const union bpf_attr *attr)
  733. {
  734. struct cgroup *cgrp;
  735. if (!capable(CAP_NET_ADMIN))
  736. return -EPERM;
  737. if (CHECK_ATTR(BPF_PROG_DETACH))
  738. return -EINVAL;
  739. switch (attr->attach_type) {
  740. case BPF_CGROUP_INET_INGRESS:
  741. case BPF_CGROUP_INET_EGRESS:
  742. cgrp = cgroup_get_from_fd(attr->target_fd);
  743. if (IS_ERR(cgrp))
  744. return PTR_ERR(cgrp);
  745. cgroup_bpf_update(cgrp, NULL, attr->attach_type);
  746. cgroup_put(cgrp);
  747. break;
  748. default:
  749. return -EINVAL;
  750. }
  751. return 0;
  752. }
  753. #endif /* CONFIG_CGROUP_BPF */
  754. SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
  755. {
  756. union bpf_attr attr = {};
  757. int err;
  758. if (!capable(CAP_SYS_ADMIN) && sysctl_unprivileged_bpf_disabled)
  759. return -EPERM;
  760. if (!access_ok(VERIFY_READ, uattr, 1))
  761. return -EFAULT;
  762. if (size > PAGE_SIZE) /* silly large */
  763. return -E2BIG;
  764. /* If we're handed a bigger struct than we know of,
  765. * ensure all the unknown bits are 0 - i.e. new
  766. * user-space does not rely on any kernel feature
  767. * extensions we dont know about yet.
  768. */
  769. if (size > sizeof(attr)) {
  770. unsigned char __user *addr;
  771. unsigned char __user *end;
  772. unsigned char val;
  773. addr = (void __user *)uattr + sizeof(attr);
  774. end = (void __user *)uattr + size;
  775. for (; addr < end; addr++) {
  776. err = get_user(val, addr);
  777. if (err)
  778. return err;
  779. if (val)
  780. return -E2BIG;
  781. }
  782. size = sizeof(attr);
  783. }
  784. /* copy attributes from user space, may be less than sizeof(bpf_attr) */
  785. if (copy_from_user(&attr, uattr, size) != 0)
  786. return -EFAULT;
  787. switch (cmd) {
  788. case BPF_MAP_CREATE:
  789. err = map_create(&attr);
  790. break;
  791. case BPF_MAP_LOOKUP_ELEM:
  792. err = map_lookup_elem(&attr);
  793. break;
  794. case BPF_MAP_UPDATE_ELEM:
  795. err = map_update_elem(&attr);
  796. break;
  797. case BPF_MAP_DELETE_ELEM:
  798. err = map_delete_elem(&attr);
  799. break;
  800. case BPF_MAP_GET_NEXT_KEY:
  801. err = map_get_next_key(&attr);
  802. break;
  803. case BPF_PROG_LOAD:
  804. err = bpf_prog_load(&attr);
  805. break;
  806. case BPF_OBJ_PIN:
  807. err = bpf_obj_pin(&attr);
  808. break;
  809. case BPF_OBJ_GET:
  810. err = bpf_obj_get(&attr);
  811. break;
  812. #ifdef CONFIG_CGROUP_BPF
  813. case BPF_PROG_ATTACH:
  814. err = bpf_prog_attach(&attr);
  815. break;
  816. case BPF_PROG_DETACH:
  817. err = bpf_prog_detach(&attr);
  818. break;
  819. #endif
  820. default:
  821. err = -EINVAL;
  822. break;
  823. }
  824. return err;
  825. }