syscall.c 24 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081
  1. /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
  2. *
  3. * This program is free software; you can redistribute it and/or
  4. * modify it under the terms of version 2 of the GNU General Public
  5. * License as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful, but
  8. * WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  10. * General Public License for more details.
  11. */
  12. #include <linux/bpf.h>
  13. #include <linux/bpf_trace.h>
  14. #include <linux/syscalls.h>
  15. #include <linux/slab.h>
  16. #include <linux/sched/signal.h>
  17. #include <linux/vmalloc.h>
  18. #include <linux/mmzone.h>
  19. #include <linux/anon_inodes.h>
  20. #include <linux/file.h>
  21. #include <linux/license.h>
  22. #include <linux/filter.h>
  23. #include <linux/version.h>
  24. #include <linux/kernel.h>
  25. DEFINE_PER_CPU(int, bpf_prog_active);
  26. int sysctl_unprivileged_bpf_disabled __read_mostly;
  27. static LIST_HEAD(bpf_map_types);
  28. static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
  29. {
  30. struct bpf_map_type_list *tl;
  31. struct bpf_map *map;
  32. list_for_each_entry(tl, &bpf_map_types, list_node) {
  33. if (tl->type == attr->map_type) {
  34. map = tl->ops->map_alloc(attr);
  35. if (IS_ERR(map))
  36. return map;
  37. map->ops = tl->ops;
  38. map->map_type = attr->map_type;
  39. return map;
  40. }
  41. }
  42. return ERR_PTR(-EINVAL);
  43. }
  44. /* boot time registration of different map implementations */
  45. void bpf_register_map_type(struct bpf_map_type_list *tl)
  46. {
  47. list_add(&tl->list_node, &bpf_map_types);
  48. }
  49. void *bpf_map_area_alloc(size_t size)
  50. {
  51. /* We definitely need __GFP_NORETRY, so OOM killer doesn't
  52. * trigger under memory pressure as we really just want to
  53. * fail instead.
  54. */
  55. const gfp_t flags = __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO;
  56. void *area;
  57. if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
  58. area = kmalloc(size, GFP_USER | flags);
  59. if (area != NULL)
  60. return area;
  61. }
  62. return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | flags,
  63. PAGE_KERNEL);
  64. }
  65. void bpf_map_area_free(void *area)
  66. {
  67. kvfree(area);
  68. }
  69. int bpf_map_precharge_memlock(u32 pages)
  70. {
  71. struct user_struct *user = get_current_user();
  72. unsigned long memlock_limit, cur;
  73. memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
  74. cur = atomic_long_read(&user->locked_vm);
  75. free_uid(user);
  76. if (cur + pages > memlock_limit)
  77. return -EPERM;
  78. return 0;
  79. }
  80. static int bpf_map_charge_memlock(struct bpf_map *map)
  81. {
  82. struct user_struct *user = get_current_user();
  83. unsigned long memlock_limit;
  84. memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
  85. atomic_long_add(map->pages, &user->locked_vm);
  86. if (atomic_long_read(&user->locked_vm) > memlock_limit) {
  87. atomic_long_sub(map->pages, &user->locked_vm);
  88. free_uid(user);
  89. return -EPERM;
  90. }
  91. map->user = user;
  92. return 0;
  93. }
  94. static void bpf_map_uncharge_memlock(struct bpf_map *map)
  95. {
  96. struct user_struct *user = map->user;
  97. atomic_long_sub(map->pages, &user->locked_vm);
  98. free_uid(user);
  99. }
  100. /* called from workqueue */
  101. static void bpf_map_free_deferred(struct work_struct *work)
  102. {
  103. struct bpf_map *map = container_of(work, struct bpf_map, work);
  104. bpf_map_uncharge_memlock(map);
  105. /* implementation dependent freeing */
  106. map->ops->map_free(map);
  107. }
  108. static void bpf_map_put_uref(struct bpf_map *map)
  109. {
  110. if (atomic_dec_and_test(&map->usercnt)) {
  111. if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY)
  112. bpf_fd_array_map_clear(map);
  113. }
  114. }
  115. /* decrement map refcnt and schedule it for freeing via workqueue
  116. * (unrelying map implementation ops->map_free() might sleep)
  117. */
  118. void bpf_map_put(struct bpf_map *map)
  119. {
  120. if (atomic_dec_and_test(&map->refcnt)) {
  121. INIT_WORK(&map->work, bpf_map_free_deferred);
  122. schedule_work(&map->work);
  123. }
  124. }
  125. void bpf_map_put_with_uref(struct bpf_map *map)
  126. {
  127. bpf_map_put_uref(map);
  128. bpf_map_put(map);
  129. }
  130. static int bpf_map_release(struct inode *inode, struct file *filp)
  131. {
  132. struct bpf_map *map = filp->private_data;
  133. if (map->ops->map_release)
  134. map->ops->map_release(map, filp);
  135. bpf_map_put_with_uref(map);
  136. return 0;
  137. }
  138. #ifdef CONFIG_PROC_FS
  139. static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
  140. {
  141. const struct bpf_map *map = filp->private_data;
  142. const struct bpf_array *array;
  143. u32 owner_prog_type = 0;
  144. if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) {
  145. array = container_of(map, struct bpf_array, map);
  146. owner_prog_type = array->owner_prog_type;
  147. }
  148. seq_printf(m,
  149. "map_type:\t%u\n"
  150. "key_size:\t%u\n"
  151. "value_size:\t%u\n"
  152. "max_entries:\t%u\n"
  153. "map_flags:\t%#x\n"
  154. "memlock:\t%llu\n",
  155. map->map_type,
  156. map->key_size,
  157. map->value_size,
  158. map->max_entries,
  159. map->map_flags,
  160. map->pages * 1ULL << PAGE_SHIFT);
  161. if (owner_prog_type)
  162. seq_printf(m, "owner_prog_type:\t%u\n",
  163. owner_prog_type);
  164. }
  165. #endif
  166. static const struct file_operations bpf_map_fops = {
  167. #ifdef CONFIG_PROC_FS
  168. .show_fdinfo = bpf_map_show_fdinfo,
  169. #endif
  170. .release = bpf_map_release,
  171. };
  172. int bpf_map_new_fd(struct bpf_map *map)
  173. {
  174. return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
  175. O_RDWR | O_CLOEXEC);
  176. }
  177. /* helper macro to check that unused fields 'union bpf_attr' are zero */
  178. #define CHECK_ATTR(CMD) \
  179. memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
  180. sizeof(attr->CMD##_LAST_FIELD), 0, \
  181. sizeof(*attr) - \
  182. offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
  183. sizeof(attr->CMD##_LAST_FIELD)) != NULL
  184. #define BPF_MAP_CREATE_LAST_FIELD inner_map_fd
  185. /* called via syscall */
  186. static int map_create(union bpf_attr *attr)
  187. {
  188. struct bpf_map *map;
  189. int err;
  190. err = CHECK_ATTR(BPF_MAP_CREATE);
  191. if (err)
  192. return -EINVAL;
  193. /* find map type and init map: hashtable vs rbtree vs bloom vs ... */
  194. map = find_and_alloc_map(attr);
  195. if (IS_ERR(map))
  196. return PTR_ERR(map);
  197. atomic_set(&map->refcnt, 1);
  198. atomic_set(&map->usercnt, 1);
  199. err = bpf_map_charge_memlock(map);
  200. if (err)
  201. goto free_map_nouncharge;
  202. err = bpf_map_new_fd(map);
  203. if (err < 0)
  204. /* failed to allocate fd */
  205. goto free_map;
  206. trace_bpf_map_create(map, err);
  207. return err;
  208. free_map:
  209. bpf_map_uncharge_memlock(map);
  210. free_map_nouncharge:
  211. map->ops->map_free(map);
  212. return err;
  213. }
  214. /* if error is returned, fd is released.
  215. * On success caller should complete fd access with matching fdput()
  216. */
  217. struct bpf_map *__bpf_map_get(struct fd f)
  218. {
  219. if (!f.file)
  220. return ERR_PTR(-EBADF);
  221. if (f.file->f_op != &bpf_map_fops) {
  222. fdput(f);
  223. return ERR_PTR(-EINVAL);
  224. }
  225. return f.file->private_data;
  226. }
  227. /* prog's and map's refcnt limit */
  228. #define BPF_MAX_REFCNT 32768
  229. struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref)
  230. {
  231. if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) {
  232. atomic_dec(&map->refcnt);
  233. return ERR_PTR(-EBUSY);
  234. }
  235. if (uref)
  236. atomic_inc(&map->usercnt);
  237. return map;
  238. }
  239. struct bpf_map *bpf_map_get_with_uref(u32 ufd)
  240. {
  241. struct fd f = fdget(ufd);
  242. struct bpf_map *map;
  243. map = __bpf_map_get(f);
  244. if (IS_ERR(map))
  245. return map;
  246. map = bpf_map_inc(map, true);
  247. fdput(f);
  248. return map;
  249. }
  250. int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
  251. {
  252. return -ENOTSUPP;
  253. }
  254. /* last field in 'union bpf_attr' used by this command */
  255. #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD value
  256. static int map_lookup_elem(union bpf_attr *attr)
  257. {
  258. void __user *ukey = u64_to_user_ptr(attr->key);
  259. void __user *uvalue = u64_to_user_ptr(attr->value);
  260. int ufd = attr->map_fd;
  261. struct bpf_map *map;
  262. void *key, *value, *ptr;
  263. u32 value_size;
  264. struct fd f;
  265. int err;
  266. if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
  267. return -EINVAL;
  268. f = fdget(ufd);
  269. map = __bpf_map_get(f);
  270. if (IS_ERR(map))
  271. return PTR_ERR(map);
  272. err = -ENOMEM;
  273. key = kmalloc(map->key_size, GFP_USER);
  274. if (!key)
  275. goto err_put;
  276. err = -EFAULT;
  277. if (copy_from_user(key, ukey, map->key_size) != 0)
  278. goto free_key;
  279. if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
  280. map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
  281. map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
  282. value_size = round_up(map->value_size, 8) * num_possible_cpus();
  283. else
  284. value_size = map->value_size;
  285. err = -ENOMEM;
  286. value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
  287. if (!value)
  288. goto free_key;
  289. if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
  290. map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
  291. err = bpf_percpu_hash_copy(map, key, value);
  292. } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
  293. err = bpf_percpu_array_copy(map, key, value);
  294. } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
  295. err = bpf_stackmap_copy(map, key, value);
  296. } else if (map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
  297. map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
  298. err = -ENOTSUPP;
  299. } else {
  300. rcu_read_lock();
  301. ptr = map->ops->map_lookup_elem(map, key);
  302. if (ptr)
  303. memcpy(value, ptr, value_size);
  304. rcu_read_unlock();
  305. err = ptr ? 0 : -ENOENT;
  306. }
  307. if (err)
  308. goto free_value;
  309. err = -EFAULT;
  310. if (copy_to_user(uvalue, value, value_size) != 0)
  311. goto free_value;
  312. trace_bpf_map_lookup_elem(map, ufd, key, value);
  313. err = 0;
  314. free_value:
  315. kfree(value);
  316. free_key:
  317. kfree(key);
  318. err_put:
  319. fdput(f);
  320. return err;
  321. }
  322. #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
  323. static int map_update_elem(union bpf_attr *attr)
  324. {
  325. void __user *ukey = u64_to_user_ptr(attr->key);
  326. void __user *uvalue = u64_to_user_ptr(attr->value);
  327. int ufd = attr->map_fd;
  328. struct bpf_map *map;
  329. void *key, *value;
  330. u32 value_size;
  331. struct fd f;
  332. int err;
  333. if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
  334. return -EINVAL;
  335. f = fdget(ufd);
  336. map = __bpf_map_get(f);
  337. if (IS_ERR(map))
  338. return PTR_ERR(map);
  339. err = -ENOMEM;
  340. key = kmalloc(map->key_size, GFP_USER);
  341. if (!key)
  342. goto err_put;
  343. err = -EFAULT;
  344. if (copy_from_user(key, ukey, map->key_size) != 0)
  345. goto free_key;
  346. if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
  347. map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
  348. map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
  349. value_size = round_up(map->value_size, 8) * num_possible_cpus();
  350. else
  351. value_size = map->value_size;
  352. err = -ENOMEM;
  353. value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
  354. if (!value)
  355. goto free_key;
  356. err = -EFAULT;
  357. if (copy_from_user(value, uvalue, value_size) != 0)
  358. goto free_value;
  359. /* must increment bpf_prog_active to avoid kprobe+bpf triggering from
  360. * inside bpf map update or delete otherwise deadlocks are possible
  361. */
  362. preempt_disable();
  363. __this_cpu_inc(bpf_prog_active);
  364. if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
  365. map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
  366. err = bpf_percpu_hash_update(map, key, value, attr->flags);
  367. } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
  368. err = bpf_percpu_array_update(map, key, value, attr->flags);
  369. } else if (map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY ||
  370. map->map_type == BPF_MAP_TYPE_PROG_ARRAY ||
  371. map->map_type == BPF_MAP_TYPE_CGROUP_ARRAY ||
  372. map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) {
  373. rcu_read_lock();
  374. err = bpf_fd_array_map_update_elem(map, f.file, key, value,
  375. attr->flags);
  376. rcu_read_unlock();
  377. } else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
  378. rcu_read_lock();
  379. err = bpf_fd_htab_map_update_elem(map, f.file, key, value,
  380. attr->flags);
  381. rcu_read_unlock();
  382. } else {
  383. rcu_read_lock();
  384. err = map->ops->map_update_elem(map, key, value, attr->flags);
  385. rcu_read_unlock();
  386. }
  387. __this_cpu_dec(bpf_prog_active);
  388. preempt_enable();
  389. if (!err)
  390. trace_bpf_map_update_elem(map, ufd, key, value);
  391. free_value:
  392. kfree(value);
  393. free_key:
  394. kfree(key);
  395. err_put:
  396. fdput(f);
  397. return err;
  398. }
  399. #define BPF_MAP_DELETE_ELEM_LAST_FIELD key
  400. static int map_delete_elem(union bpf_attr *attr)
  401. {
  402. void __user *ukey = u64_to_user_ptr(attr->key);
  403. int ufd = attr->map_fd;
  404. struct bpf_map *map;
  405. struct fd f;
  406. void *key;
  407. int err;
  408. if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
  409. return -EINVAL;
  410. f = fdget(ufd);
  411. map = __bpf_map_get(f);
  412. if (IS_ERR(map))
  413. return PTR_ERR(map);
  414. err = -ENOMEM;
  415. key = kmalloc(map->key_size, GFP_USER);
  416. if (!key)
  417. goto err_put;
  418. err = -EFAULT;
  419. if (copy_from_user(key, ukey, map->key_size) != 0)
  420. goto free_key;
  421. preempt_disable();
  422. __this_cpu_inc(bpf_prog_active);
  423. rcu_read_lock();
  424. err = map->ops->map_delete_elem(map, key);
  425. rcu_read_unlock();
  426. __this_cpu_dec(bpf_prog_active);
  427. preempt_enable();
  428. if (!err)
  429. trace_bpf_map_delete_elem(map, ufd, key);
  430. free_key:
  431. kfree(key);
  432. err_put:
  433. fdput(f);
  434. return err;
  435. }
  436. /* last field in 'union bpf_attr' used by this command */
  437. #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
  438. static int map_get_next_key(union bpf_attr *attr)
  439. {
  440. void __user *ukey = u64_to_user_ptr(attr->key);
  441. void __user *unext_key = u64_to_user_ptr(attr->next_key);
  442. int ufd = attr->map_fd;
  443. struct bpf_map *map;
  444. void *key, *next_key;
  445. struct fd f;
  446. int err;
  447. if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
  448. return -EINVAL;
  449. f = fdget(ufd);
  450. map = __bpf_map_get(f);
  451. if (IS_ERR(map))
  452. return PTR_ERR(map);
  453. err = -ENOMEM;
  454. key = kmalloc(map->key_size, GFP_USER);
  455. if (!key)
  456. goto err_put;
  457. err = -EFAULT;
  458. if (copy_from_user(key, ukey, map->key_size) != 0)
  459. goto free_key;
  460. err = -ENOMEM;
  461. next_key = kmalloc(map->key_size, GFP_USER);
  462. if (!next_key)
  463. goto free_key;
  464. rcu_read_lock();
  465. err = map->ops->map_get_next_key(map, key, next_key);
  466. rcu_read_unlock();
  467. if (err)
  468. goto free_next_key;
  469. err = -EFAULT;
  470. if (copy_to_user(unext_key, next_key, map->key_size) != 0)
  471. goto free_next_key;
  472. trace_bpf_map_next_key(map, ufd, key, next_key);
  473. err = 0;
  474. free_next_key:
  475. kfree(next_key);
  476. free_key:
  477. kfree(key);
  478. err_put:
  479. fdput(f);
  480. return err;
  481. }
  482. static LIST_HEAD(bpf_prog_types);
  483. static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
  484. {
  485. struct bpf_prog_type_list *tl;
  486. list_for_each_entry(tl, &bpf_prog_types, list_node) {
  487. if (tl->type == type) {
  488. prog->aux->ops = tl->ops;
  489. prog->type = type;
  490. return 0;
  491. }
  492. }
  493. return -EINVAL;
  494. }
  495. void bpf_register_prog_type(struct bpf_prog_type_list *tl)
  496. {
  497. list_add(&tl->list_node, &bpf_prog_types);
  498. }
  499. /* drop refcnt on maps used by eBPF program and free auxilary data */
  500. static void free_used_maps(struct bpf_prog_aux *aux)
  501. {
  502. int i;
  503. for (i = 0; i < aux->used_map_cnt; i++)
  504. bpf_map_put(aux->used_maps[i]);
  505. kfree(aux->used_maps);
  506. }
  507. int __bpf_prog_charge(struct user_struct *user, u32 pages)
  508. {
  509. unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
  510. unsigned long user_bufs;
  511. if (user) {
  512. user_bufs = atomic_long_add_return(pages, &user->locked_vm);
  513. if (user_bufs > memlock_limit) {
  514. atomic_long_sub(pages, &user->locked_vm);
  515. return -EPERM;
  516. }
  517. }
  518. return 0;
  519. }
  520. void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
  521. {
  522. if (user)
  523. atomic_long_sub(pages, &user->locked_vm);
  524. }
  525. static int bpf_prog_charge_memlock(struct bpf_prog *prog)
  526. {
  527. struct user_struct *user = get_current_user();
  528. int ret;
  529. ret = __bpf_prog_charge(user, prog->pages);
  530. if (ret) {
  531. free_uid(user);
  532. return ret;
  533. }
  534. prog->aux->user = user;
  535. return 0;
  536. }
  537. static void bpf_prog_uncharge_memlock(struct bpf_prog *prog)
  538. {
  539. struct user_struct *user = prog->aux->user;
  540. __bpf_prog_uncharge(user, prog->pages);
  541. free_uid(user);
  542. }
  543. static void __bpf_prog_put_rcu(struct rcu_head *rcu)
  544. {
  545. struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
  546. free_used_maps(aux);
  547. bpf_prog_uncharge_memlock(aux->prog);
  548. bpf_prog_free(aux->prog);
  549. }
  550. void bpf_prog_put(struct bpf_prog *prog)
  551. {
  552. if (atomic_dec_and_test(&prog->aux->refcnt)) {
  553. trace_bpf_prog_put_rcu(prog);
  554. bpf_prog_kallsyms_del(prog);
  555. call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
  556. }
  557. }
  558. EXPORT_SYMBOL_GPL(bpf_prog_put);
  559. static int bpf_prog_release(struct inode *inode, struct file *filp)
  560. {
  561. struct bpf_prog *prog = filp->private_data;
  562. bpf_prog_put(prog);
  563. return 0;
  564. }
  565. #ifdef CONFIG_PROC_FS
  566. static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
  567. {
  568. const struct bpf_prog *prog = filp->private_data;
  569. char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
  570. bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
  571. seq_printf(m,
  572. "prog_type:\t%u\n"
  573. "prog_jited:\t%u\n"
  574. "prog_tag:\t%s\n"
  575. "memlock:\t%llu\n",
  576. prog->type,
  577. prog->jited,
  578. prog_tag,
  579. prog->pages * 1ULL << PAGE_SHIFT);
  580. }
  581. #endif
  582. static const struct file_operations bpf_prog_fops = {
  583. #ifdef CONFIG_PROC_FS
  584. .show_fdinfo = bpf_prog_show_fdinfo,
  585. #endif
  586. .release = bpf_prog_release,
  587. };
  588. int bpf_prog_new_fd(struct bpf_prog *prog)
  589. {
  590. return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
  591. O_RDWR | O_CLOEXEC);
  592. }
  593. static struct bpf_prog *____bpf_prog_get(struct fd f)
  594. {
  595. if (!f.file)
  596. return ERR_PTR(-EBADF);
  597. if (f.file->f_op != &bpf_prog_fops) {
  598. fdput(f);
  599. return ERR_PTR(-EINVAL);
  600. }
  601. return f.file->private_data;
  602. }
  603. struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i)
  604. {
  605. if (atomic_add_return(i, &prog->aux->refcnt) > BPF_MAX_REFCNT) {
  606. atomic_sub(i, &prog->aux->refcnt);
  607. return ERR_PTR(-EBUSY);
  608. }
  609. return prog;
  610. }
  611. EXPORT_SYMBOL_GPL(bpf_prog_add);
  612. void bpf_prog_sub(struct bpf_prog *prog, int i)
  613. {
  614. /* Only to be used for undoing previous bpf_prog_add() in some
  615. * error path. We still know that another entity in our call
  616. * path holds a reference to the program, thus atomic_sub() can
  617. * be safely used in such cases!
  618. */
  619. WARN_ON(atomic_sub_return(i, &prog->aux->refcnt) == 0);
  620. }
  621. EXPORT_SYMBOL_GPL(bpf_prog_sub);
  622. struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
  623. {
  624. return bpf_prog_add(prog, 1);
  625. }
  626. EXPORT_SYMBOL_GPL(bpf_prog_inc);
  627. static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *type)
  628. {
  629. struct fd f = fdget(ufd);
  630. struct bpf_prog *prog;
  631. prog = ____bpf_prog_get(f);
  632. if (IS_ERR(prog))
  633. return prog;
  634. if (type && prog->type != *type) {
  635. prog = ERR_PTR(-EINVAL);
  636. goto out;
  637. }
  638. prog = bpf_prog_inc(prog);
  639. out:
  640. fdput(f);
  641. return prog;
  642. }
  643. struct bpf_prog *bpf_prog_get(u32 ufd)
  644. {
  645. return __bpf_prog_get(ufd, NULL);
  646. }
  647. struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type)
  648. {
  649. struct bpf_prog *prog = __bpf_prog_get(ufd, &type);
  650. if (!IS_ERR(prog))
  651. trace_bpf_prog_get_type(prog);
  652. return prog;
  653. }
  654. EXPORT_SYMBOL_GPL(bpf_prog_get_type);
  655. /* last field in 'union bpf_attr' used by this command */
  656. #define BPF_PROG_LOAD_LAST_FIELD kern_version
  657. static int bpf_prog_load(union bpf_attr *attr)
  658. {
  659. enum bpf_prog_type type = attr->prog_type;
  660. struct bpf_prog *prog;
  661. int err;
  662. char license[128];
  663. bool is_gpl;
  664. if (CHECK_ATTR(BPF_PROG_LOAD))
  665. return -EINVAL;
  666. /* copy eBPF program license from user space */
  667. if (strncpy_from_user(license, u64_to_user_ptr(attr->license),
  668. sizeof(license) - 1) < 0)
  669. return -EFAULT;
  670. license[sizeof(license) - 1] = 0;
  671. /* eBPF programs must be GPL compatible to use GPL-ed functions */
  672. is_gpl = license_is_gpl_compatible(license);
  673. if (attr->insn_cnt == 0 || attr->insn_cnt > BPF_MAXINSNS)
  674. return -E2BIG;
  675. if (type == BPF_PROG_TYPE_KPROBE &&
  676. attr->kern_version != LINUX_VERSION_CODE)
  677. return -EINVAL;
  678. if (type != BPF_PROG_TYPE_SOCKET_FILTER && !capable(CAP_SYS_ADMIN))
  679. return -EPERM;
  680. /* plain bpf_prog allocation */
  681. prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
  682. if (!prog)
  683. return -ENOMEM;
  684. err = bpf_prog_charge_memlock(prog);
  685. if (err)
  686. goto free_prog_nouncharge;
  687. prog->len = attr->insn_cnt;
  688. err = -EFAULT;
  689. if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns),
  690. bpf_prog_insn_size(prog)) != 0)
  691. goto free_prog;
  692. prog->orig_prog = NULL;
  693. prog->jited = 0;
  694. atomic_set(&prog->aux->refcnt, 1);
  695. prog->gpl_compatible = is_gpl ? 1 : 0;
  696. /* find program type: socket_filter vs tracing_filter */
  697. err = find_prog_type(type, prog);
  698. if (err < 0)
  699. goto free_prog;
  700. /* run eBPF verifier */
  701. err = bpf_check(&prog, attr);
  702. if (err < 0)
  703. goto free_used_maps;
  704. /* eBPF program is ready to be JITed */
  705. prog = bpf_prog_select_runtime(prog, &err);
  706. if (err < 0)
  707. goto free_used_maps;
  708. err = bpf_prog_new_fd(prog);
  709. if (err < 0)
  710. /* failed to allocate fd */
  711. goto free_used_maps;
  712. bpf_prog_kallsyms_add(prog);
  713. trace_bpf_prog_load(prog, err);
  714. return err;
  715. free_used_maps:
  716. free_used_maps(prog->aux);
  717. free_prog:
  718. bpf_prog_uncharge_memlock(prog);
  719. free_prog_nouncharge:
  720. bpf_prog_free(prog);
  721. return err;
  722. }
  723. #define BPF_OBJ_LAST_FIELD bpf_fd
  724. static int bpf_obj_pin(const union bpf_attr *attr)
  725. {
  726. if (CHECK_ATTR(BPF_OBJ))
  727. return -EINVAL;
  728. return bpf_obj_pin_user(attr->bpf_fd, u64_to_user_ptr(attr->pathname));
  729. }
  730. static int bpf_obj_get(const union bpf_attr *attr)
  731. {
  732. if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0)
  733. return -EINVAL;
  734. return bpf_obj_get_user(u64_to_user_ptr(attr->pathname));
  735. }
  736. #ifdef CONFIG_CGROUP_BPF
  737. #define BPF_PROG_ATTACH_LAST_FIELD attach_flags
  738. static int bpf_prog_attach(const union bpf_attr *attr)
  739. {
  740. enum bpf_prog_type ptype;
  741. struct bpf_prog *prog;
  742. struct cgroup *cgrp;
  743. int ret;
  744. if (!capable(CAP_NET_ADMIN))
  745. return -EPERM;
  746. if (CHECK_ATTR(BPF_PROG_ATTACH))
  747. return -EINVAL;
  748. if (attr->attach_flags & ~BPF_F_ALLOW_OVERRIDE)
  749. return -EINVAL;
  750. switch (attr->attach_type) {
  751. case BPF_CGROUP_INET_INGRESS:
  752. case BPF_CGROUP_INET_EGRESS:
  753. ptype = BPF_PROG_TYPE_CGROUP_SKB;
  754. break;
  755. case BPF_CGROUP_INET_SOCK_CREATE:
  756. ptype = BPF_PROG_TYPE_CGROUP_SOCK;
  757. break;
  758. default:
  759. return -EINVAL;
  760. }
  761. prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
  762. if (IS_ERR(prog))
  763. return PTR_ERR(prog);
  764. cgrp = cgroup_get_from_fd(attr->target_fd);
  765. if (IS_ERR(cgrp)) {
  766. bpf_prog_put(prog);
  767. return PTR_ERR(cgrp);
  768. }
  769. ret = cgroup_bpf_update(cgrp, prog, attr->attach_type,
  770. attr->attach_flags & BPF_F_ALLOW_OVERRIDE);
  771. if (ret)
  772. bpf_prog_put(prog);
  773. cgroup_put(cgrp);
  774. return ret;
  775. }
  776. #define BPF_PROG_DETACH_LAST_FIELD attach_type
  777. static int bpf_prog_detach(const union bpf_attr *attr)
  778. {
  779. struct cgroup *cgrp;
  780. int ret;
  781. if (!capable(CAP_NET_ADMIN))
  782. return -EPERM;
  783. if (CHECK_ATTR(BPF_PROG_DETACH))
  784. return -EINVAL;
  785. switch (attr->attach_type) {
  786. case BPF_CGROUP_INET_INGRESS:
  787. case BPF_CGROUP_INET_EGRESS:
  788. case BPF_CGROUP_INET_SOCK_CREATE:
  789. cgrp = cgroup_get_from_fd(attr->target_fd);
  790. if (IS_ERR(cgrp))
  791. return PTR_ERR(cgrp);
  792. ret = cgroup_bpf_update(cgrp, NULL, attr->attach_type, false);
  793. cgroup_put(cgrp);
  794. break;
  795. default:
  796. return -EINVAL;
  797. }
  798. return ret;
  799. }
  800. #endif /* CONFIG_CGROUP_BPF */
  801. #define BPF_PROG_TEST_RUN_LAST_FIELD test.duration
  802. static int bpf_prog_test_run(const union bpf_attr *attr,
  803. union bpf_attr __user *uattr)
  804. {
  805. struct bpf_prog *prog;
  806. int ret = -ENOTSUPP;
  807. if (CHECK_ATTR(BPF_PROG_TEST_RUN))
  808. return -EINVAL;
  809. prog = bpf_prog_get(attr->test.prog_fd);
  810. if (IS_ERR(prog))
  811. return PTR_ERR(prog);
  812. if (prog->aux->ops->test_run)
  813. ret = prog->aux->ops->test_run(prog, attr, uattr);
  814. bpf_prog_put(prog);
  815. return ret;
  816. }
  817. SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
  818. {
  819. union bpf_attr attr = {};
  820. int err;
  821. if (!capable(CAP_SYS_ADMIN) && sysctl_unprivileged_bpf_disabled)
  822. return -EPERM;
  823. if (!access_ok(VERIFY_READ, uattr, 1))
  824. return -EFAULT;
  825. if (size > PAGE_SIZE) /* silly large */
  826. return -E2BIG;
  827. /* If we're handed a bigger struct than we know of,
  828. * ensure all the unknown bits are 0 - i.e. new
  829. * user-space does not rely on any kernel feature
  830. * extensions we dont know about yet.
  831. */
  832. if (size > sizeof(attr)) {
  833. unsigned char __user *addr;
  834. unsigned char __user *end;
  835. unsigned char val;
  836. addr = (void __user *)uattr + sizeof(attr);
  837. end = (void __user *)uattr + size;
  838. for (; addr < end; addr++) {
  839. err = get_user(val, addr);
  840. if (err)
  841. return err;
  842. if (val)
  843. return -E2BIG;
  844. }
  845. size = sizeof(attr);
  846. }
  847. /* copy attributes from user space, may be less than sizeof(bpf_attr) */
  848. if (copy_from_user(&attr, uattr, size) != 0)
  849. return -EFAULT;
  850. switch (cmd) {
  851. case BPF_MAP_CREATE:
  852. err = map_create(&attr);
  853. break;
  854. case BPF_MAP_LOOKUP_ELEM:
  855. err = map_lookup_elem(&attr);
  856. break;
  857. case BPF_MAP_UPDATE_ELEM:
  858. err = map_update_elem(&attr);
  859. break;
  860. case BPF_MAP_DELETE_ELEM:
  861. err = map_delete_elem(&attr);
  862. break;
  863. case BPF_MAP_GET_NEXT_KEY:
  864. err = map_get_next_key(&attr);
  865. break;
  866. case BPF_PROG_LOAD:
  867. err = bpf_prog_load(&attr);
  868. break;
  869. case BPF_OBJ_PIN:
  870. err = bpf_obj_pin(&attr);
  871. break;
  872. case BPF_OBJ_GET:
  873. err = bpf_obj_get(&attr);
  874. break;
  875. #ifdef CONFIG_CGROUP_BPF
  876. case BPF_PROG_ATTACH:
  877. err = bpf_prog_attach(&attr);
  878. break;
  879. case BPF_PROG_DETACH:
  880. err = bpf_prog_detach(&attr);
  881. break;
  882. #endif
  883. case BPF_PROG_TEST_RUN:
  884. err = bpf_prog_test_run(&attr, uattr);
  885. break;
  886. default:
  887. err = -EINVAL;
  888. break;
  889. }
  890. return err;
  891. }