syscall.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068
  1. /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
  2. *
  3. * This program is free software; you can redistribute it and/or
  4. * modify it under the terms of version 2 of the GNU General Public
  5. * License as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful, but
  8. * WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  10. * General Public License for more details.
  11. */
  12. #include <linux/bpf.h>
  13. #include <linux/bpf_trace.h>
  14. #include <linux/syscalls.h>
  15. #include <linux/slab.h>
  16. #include <linux/anon_inodes.h>
  17. #include <linux/file.h>
  18. #include <linux/license.h>
  19. #include <linux/filter.h>
  20. #include <linux/version.h>
  21. #include <linux/kernel.h>
  22. DEFINE_PER_CPU(int, bpf_prog_active);
  23. int sysctl_unprivileged_bpf_disabled __read_mostly;
  24. static LIST_HEAD(bpf_map_types);
  25. static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
  26. {
  27. struct bpf_map_type_list *tl;
  28. struct bpf_map *map;
  29. list_for_each_entry(tl, &bpf_map_types, list_node) {
  30. if (tl->type == attr->map_type) {
  31. map = tl->ops->map_alloc(attr);
  32. if (IS_ERR(map))
  33. return map;
  34. map->ops = tl->ops;
  35. map->map_type = attr->map_type;
  36. return map;
  37. }
  38. }
  39. return ERR_PTR(-EINVAL);
  40. }
  41. /* boot time registration of different map implementations */
  42. void bpf_register_map_type(struct bpf_map_type_list *tl)
  43. {
  44. list_add(&tl->list_node, &bpf_map_types);
  45. }
  46. int bpf_map_precharge_memlock(u32 pages)
  47. {
  48. struct user_struct *user = get_current_user();
  49. unsigned long memlock_limit, cur;
  50. memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
  51. cur = atomic_long_read(&user->locked_vm);
  52. free_uid(user);
  53. if (cur + pages > memlock_limit)
  54. return -EPERM;
  55. return 0;
  56. }
  57. static int bpf_map_charge_memlock(struct bpf_map *map)
  58. {
  59. struct user_struct *user = get_current_user();
  60. unsigned long memlock_limit;
  61. memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
  62. atomic_long_add(map->pages, &user->locked_vm);
  63. if (atomic_long_read(&user->locked_vm) > memlock_limit) {
  64. atomic_long_sub(map->pages, &user->locked_vm);
  65. free_uid(user);
  66. return -EPERM;
  67. }
  68. map->user = user;
  69. return 0;
  70. }
  71. static void bpf_map_uncharge_memlock(struct bpf_map *map)
  72. {
  73. struct user_struct *user = map->user;
  74. atomic_long_sub(map->pages, &user->locked_vm);
  75. free_uid(user);
  76. }
  77. /* called from workqueue */
  78. static void bpf_map_free_deferred(struct work_struct *work)
  79. {
  80. struct bpf_map *map = container_of(work, struct bpf_map, work);
  81. bpf_map_uncharge_memlock(map);
  82. /* implementation dependent freeing */
  83. map->ops->map_free(map);
  84. }
  85. static void bpf_map_put_uref(struct bpf_map *map)
  86. {
  87. if (atomic_dec_and_test(&map->usercnt)) {
  88. if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY)
  89. bpf_fd_array_map_clear(map);
  90. }
  91. }
  92. /* decrement map refcnt and schedule it for freeing via workqueue
  93. * (unrelying map implementation ops->map_free() might sleep)
  94. */
  95. void bpf_map_put(struct bpf_map *map)
  96. {
  97. if (atomic_dec_and_test(&map->refcnt)) {
  98. INIT_WORK(&map->work, bpf_map_free_deferred);
  99. schedule_work(&map->work);
  100. }
  101. }
  102. void bpf_map_put_with_uref(struct bpf_map *map)
  103. {
  104. bpf_map_put_uref(map);
  105. bpf_map_put(map);
  106. }
  107. static int bpf_map_release(struct inode *inode, struct file *filp)
  108. {
  109. struct bpf_map *map = filp->private_data;
  110. if (map->ops->map_release)
  111. map->ops->map_release(map, filp);
  112. bpf_map_put_with_uref(map);
  113. return 0;
  114. }
  115. #ifdef CONFIG_PROC_FS
  116. static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
  117. {
  118. const struct bpf_map *map = filp->private_data;
  119. const struct bpf_array *array;
  120. u32 owner_prog_type = 0;
  121. if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) {
  122. array = container_of(map, struct bpf_array, map);
  123. owner_prog_type = array->owner_prog_type;
  124. }
  125. seq_printf(m,
  126. "map_type:\t%u\n"
  127. "key_size:\t%u\n"
  128. "value_size:\t%u\n"
  129. "max_entries:\t%u\n"
  130. "map_flags:\t%#x\n"
  131. "memlock:\t%llu\n",
  132. map->map_type,
  133. map->key_size,
  134. map->value_size,
  135. map->max_entries,
  136. map->map_flags,
  137. map->pages * 1ULL << PAGE_SHIFT);
  138. if (owner_prog_type)
  139. seq_printf(m, "owner_prog_type:\t%u\n",
  140. owner_prog_type);
  141. }
  142. #endif
  143. static const struct file_operations bpf_map_fops = {
  144. #ifdef CONFIG_PROC_FS
  145. .show_fdinfo = bpf_map_show_fdinfo,
  146. #endif
  147. .release = bpf_map_release,
  148. };
  149. int bpf_map_new_fd(struct bpf_map *map)
  150. {
  151. return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
  152. O_RDWR | O_CLOEXEC);
  153. }
  154. /* helper macro to check that unused fields 'union bpf_attr' are zero */
  155. #define CHECK_ATTR(CMD) \
  156. memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
  157. sizeof(attr->CMD##_LAST_FIELD), 0, \
  158. sizeof(*attr) - \
  159. offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
  160. sizeof(attr->CMD##_LAST_FIELD)) != NULL
  161. #define BPF_MAP_CREATE_LAST_FIELD map_flags
  162. /* called via syscall */
  163. static int map_create(union bpf_attr *attr)
  164. {
  165. struct bpf_map *map;
  166. int err;
  167. err = CHECK_ATTR(BPF_MAP_CREATE);
  168. if (err)
  169. return -EINVAL;
  170. /* find map type and init map: hashtable vs rbtree vs bloom vs ... */
  171. map = find_and_alloc_map(attr);
  172. if (IS_ERR(map))
  173. return PTR_ERR(map);
  174. atomic_set(&map->refcnt, 1);
  175. atomic_set(&map->usercnt, 1);
  176. err = bpf_map_charge_memlock(map);
  177. if (err)
  178. goto free_map_nouncharge;
  179. err = bpf_map_new_fd(map);
  180. if (err < 0)
  181. /* failed to allocate fd */
  182. goto free_map;
  183. trace_bpf_map_create(map, err);
  184. return err;
  185. free_map:
  186. bpf_map_uncharge_memlock(map);
  187. free_map_nouncharge:
  188. map->ops->map_free(map);
  189. return err;
  190. }
  191. /* if error is returned, fd is released.
  192. * On success caller should complete fd access with matching fdput()
  193. */
  194. struct bpf_map *__bpf_map_get(struct fd f)
  195. {
  196. if (!f.file)
  197. return ERR_PTR(-EBADF);
  198. if (f.file->f_op != &bpf_map_fops) {
  199. fdput(f);
  200. return ERR_PTR(-EINVAL);
  201. }
  202. return f.file->private_data;
  203. }
  204. /* prog's and map's refcnt limit */
  205. #define BPF_MAX_REFCNT 32768
  206. struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref)
  207. {
  208. if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) {
  209. atomic_dec(&map->refcnt);
  210. return ERR_PTR(-EBUSY);
  211. }
  212. if (uref)
  213. atomic_inc(&map->usercnt);
  214. return map;
  215. }
  216. struct bpf_map *bpf_map_get_with_uref(u32 ufd)
  217. {
  218. struct fd f = fdget(ufd);
  219. struct bpf_map *map;
  220. map = __bpf_map_get(f);
  221. if (IS_ERR(map))
  222. return map;
  223. map = bpf_map_inc(map, true);
  224. fdput(f);
  225. return map;
  226. }
  227. int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
  228. {
  229. return -ENOTSUPP;
  230. }
  231. /* last field in 'union bpf_attr' used by this command */
  232. #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD value
  233. static int map_lookup_elem(union bpf_attr *attr)
  234. {
  235. void __user *ukey = u64_to_user_ptr(attr->key);
  236. void __user *uvalue = u64_to_user_ptr(attr->value);
  237. int ufd = attr->map_fd;
  238. struct bpf_map *map;
  239. void *key, *value, *ptr;
  240. u32 value_size;
  241. struct fd f;
  242. int err;
  243. if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
  244. return -EINVAL;
  245. f = fdget(ufd);
  246. map = __bpf_map_get(f);
  247. if (IS_ERR(map))
  248. return PTR_ERR(map);
  249. err = -ENOMEM;
  250. key = kmalloc(map->key_size, GFP_USER);
  251. if (!key)
  252. goto err_put;
  253. err = -EFAULT;
  254. if (copy_from_user(key, ukey, map->key_size) != 0)
  255. goto free_key;
  256. if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
  257. map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
  258. map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
  259. value_size = round_up(map->value_size, 8) * num_possible_cpus();
  260. else
  261. value_size = map->value_size;
  262. err = -ENOMEM;
  263. value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
  264. if (!value)
  265. goto free_key;
  266. if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
  267. map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
  268. err = bpf_percpu_hash_copy(map, key, value);
  269. } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
  270. err = bpf_percpu_array_copy(map, key, value);
  271. } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
  272. err = bpf_stackmap_copy(map, key, value);
  273. } else {
  274. rcu_read_lock();
  275. ptr = map->ops->map_lookup_elem(map, key);
  276. if (ptr)
  277. memcpy(value, ptr, value_size);
  278. rcu_read_unlock();
  279. err = ptr ? 0 : -ENOENT;
  280. }
  281. if (err)
  282. goto free_value;
  283. err = -EFAULT;
  284. if (copy_to_user(uvalue, value, value_size) != 0)
  285. goto free_value;
  286. trace_bpf_map_lookup_elem(map, ufd, key, value);
  287. err = 0;
  288. free_value:
  289. kfree(value);
  290. free_key:
  291. kfree(key);
  292. err_put:
  293. fdput(f);
  294. return err;
  295. }
  296. #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
  297. static int map_update_elem(union bpf_attr *attr)
  298. {
  299. void __user *ukey = u64_to_user_ptr(attr->key);
  300. void __user *uvalue = u64_to_user_ptr(attr->value);
  301. int ufd = attr->map_fd;
  302. struct bpf_map *map;
  303. void *key, *value;
  304. u32 value_size;
  305. struct fd f;
  306. int err;
  307. if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
  308. return -EINVAL;
  309. f = fdget(ufd);
  310. map = __bpf_map_get(f);
  311. if (IS_ERR(map))
  312. return PTR_ERR(map);
  313. err = -ENOMEM;
  314. key = kmalloc(map->key_size, GFP_USER);
  315. if (!key)
  316. goto err_put;
  317. err = -EFAULT;
  318. if (copy_from_user(key, ukey, map->key_size) != 0)
  319. goto free_key;
  320. if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
  321. map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
  322. map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
  323. value_size = round_up(map->value_size, 8) * num_possible_cpus();
  324. else
  325. value_size = map->value_size;
  326. err = -ENOMEM;
  327. value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
  328. if (!value)
  329. goto free_key;
  330. err = -EFAULT;
  331. if (copy_from_user(value, uvalue, value_size) != 0)
  332. goto free_value;
  333. /* must increment bpf_prog_active to avoid kprobe+bpf triggering from
  334. * inside bpf map update or delete otherwise deadlocks are possible
  335. */
  336. preempt_disable();
  337. __this_cpu_inc(bpf_prog_active);
  338. if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
  339. map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
  340. err = bpf_percpu_hash_update(map, key, value, attr->flags);
  341. } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
  342. err = bpf_percpu_array_update(map, key, value, attr->flags);
  343. } else if (map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY ||
  344. map->map_type == BPF_MAP_TYPE_PROG_ARRAY ||
  345. map->map_type == BPF_MAP_TYPE_CGROUP_ARRAY) {
  346. rcu_read_lock();
  347. err = bpf_fd_array_map_update_elem(map, f.file, key, value,
  348. attr->flags);
  349. rcu_read_unlock();
  350. } else {
  351. rcu_read_lock();
  352. err = map->ops->map_update_elem(map, key, value, attr->flags);
  353. rcu_read_unlock();
  354. }
  355. __this_cpu_dec(bpf_prog_active);
  356. preempt_enable();
  357. if (!err)
  358. trace_bpf_map_update_elem(map, ufd, key, value);
  359. free_value:
  360. kfree(value);
  361. free_key:
  362. kfree(key);
  363. err_put:
  364. fdput(f);
  365. return err;
  366. }
  367. #define BPF_MAP_DELETE_ELEM_LAST_FIELD key
  368. static int map_delete_elem(union bpf_attr *attr)
  369. {
  370. void __user *ukey = u64_to_user_ptr(attr->key);
  371. int ufd = attr->map_fd;
  372. struct bpf_map *map;
  373. struct fd f;
  374. void *key;
  375. int err;
  376. if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
  377. return -EINVAL;
  378. f = fdget(ufd);
  379. map = __bpf_map_get(f);
  380. if (IS_ERR(map))
  381. return PTR_ERR(map);
  382. err = -ENOMEM;
  383. key = kmalloc(map->key_size, GFP_USER);
  384. if (!key)
  385. goto err_put;
  386. err = -EFAULT;
  387. if (copy_from_user(key, ukey, map->key_size) != 0)
  388. goto free_key;
  389. preempt_disable();
  390. __this_cpu_inc(bpf_prog_active);
  391. rcu_read_lock();
  392. err = map->ops->map_delete_elem(map, key);
  393. rcu_read_unlock();
  394. __this_cpu_dec(bpf_prog_active);
  395. preempt_enable();
  396. if (!err)
  397. trace_bpf_map_delete_elem(map, ufd, key);
  398. free_key:
  399. kfree(key);
  400. err_put:
  401. fdput(f);
  402. return err;
  403. }
  404. /* last field in 'union bpf_attr' used by this command */
  405. #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
  406. static int map_get_next_key(union bpf_attr *attr)
  407. {
  408. void __user *ukey = u64_to_user_ptr(attr->key);
  409. void __user *unext_key = u64_to_user_ptr(attr->next_key);
  410. int ufd = attr->map_fd;
  411. struct bpf_map *map;
  412. void *key, *next_key;
  413. struct fd f;
  414. int err;
  415. if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
  416. return -EINVAL;
  417. f = fdget(ufd);
  418. map = __bpf_map_get(f);
  419. if (IS_ERR(map))
  420. return PTR_ERR(map);
  421. err = -ENOMEM;
  422. key = kmalloc(map->key_size, GFP_USER);
  423. if (!key)
  424. goto err_put;
  425. err = -EFAULT;
  426. if (copy_from_user(key, ukey, map->key_size) != 0)
  427. goto free_key;
  428. err = -ENOMEM;
  429. next_key = kmalloc(map->key_size, GFP_USER);
  430. if (!next_key)
  431. goto free_key;
  432. rcu_read_lock();
  433. err = map->ops->map_get_next_key(map, key, next_key);
  434. rcu_read_unlock();
  435. if (err)
  436. goto free_next_key;
  437. err = -EFAULT;
  438. if (copy_to_user(unext_key, next_key, map->key_size) != 0)
  439. goto free_next_key;
  440. trace_bpf_map_next_key(map, ufd, key, next_key);
  441. err = 0;
  442. free_next_key:
  443. kfree(next_key);
  444. free_key:
  445. kfree(key);
  446. err_put:
  447. fdput(f);
  448. return err;
  449. }
  450. static LIST_HEAD(bpf_prog_types);
  451. static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
  452. {
  453. struct bpf_prog_type_list *tl;
  454. list_for_each_entry(tl, &bpf_prog_types, list_node) {
  455. if (tl->type == type) {
  456. prog->aux->ops = tl->ops;
  457. prog->type = type;
  458. return 0;
  459. }
  460. }
  461. return -EINVAL;
  462. }
  463. void bpf_register_prog_type(struct bpf_prog_type_list *tl)
  464. {
  465. list_add(&tl->list_node, &bpf_prog_types);
  466. }
  467. /* fixup insn->imm field of bpf_call instructions:
  468. * if (insn->imm == BPF_FUNC_map_lookup_elem)
  469. * insn->imm = bpf_map_lookup_elem - __bpf_call_base;
  470. * else if (insn->imm == BPF_FUNC_map_update_elem)
  471. * insn->imm = bpf_map_update_elem - __bpf_call_base;
  472. * else ...
  473. *
  474. * this function is called after eBPF program passed verification
  475. */
  476. static void fixup_bpf_calls(struct bpf_prog *prog)
  477. {
  478. const struct bpf_func_proto *fn;
  479. int i;
  480. for (i = 0; i < prog->len; i++) {
  481. struct bpf_insn *insn = &prog->insnsi[i];
  482. if (insn->code == (BPF_JMP | BPF_CALL)) {
  483. /* we reach here when program has bpf_call instructions
  484. * and it passed bpf_check(), means that
  485. * ops->get_func_proto must have been supplied, check it
  486. */
  487. BUG_ON(!prog->aux->ops->get_func_proto);
  488. if (insn->imm == BPF_FUNC_get_route_realm)
  489. prog->dst_needed = 1;
  490. if (insn->imm == BPF_FUNC_get_prandom_u32)
  491. bpf_user_rnd_init_once();
  492. if (insn->imm == BPF_FUNC_xdp_adjust_head)
  493. prog->xdp_adjust_head = 1;
  494. if (insn->imm == BPF_FUNC_tail_call) {
  495. /* mark bpf_tail_call as different opcode
  496. * to avoid conditional branch in
  497. * interpeter for every normal call
  498. * and to prevent accidental JITing by
  499. * JIT compiler that doesn't support
  500. * bpf_tail_call yet
  501. */
  502. insn->imm = 0;
  503. insn->code |= BPF_X;
  504. continue;
  505. }
  506. fn = prog->aux->ops->get_func_proto(insn->imm);
  507. /* all functions that have prototype and verifier allowed
  508. * programs to call them, must be real in-kernel functions
  509. */
  510. BUG_ON(!fn->func);
  511. insn->imm = fn->func - __bpf_call_base;
  512. }
  513. }
  514. }
  515. /* drop refcnt on maps used by eBPF program and free auxilary data */
  516. static void free_used_maps(struct bpf_prog_aux *aux)
  517. {
  518. int i;
  519. for (i = 0; i < aux->used_map_cnt; i++)
  520. bpf_map_put(aux->used_maps[i]);
  521. kfree(aux->used_maps);
  522. }
  523. int __bpf_prog_charge(struct user_struct *user, u32 pages)
  524. {
  525. unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
  526. unsigned long user_bufs;
  527. if (user) {
  528. user_bufs = atomic_long_add_return(pages, &user->locked_vm);
  529. if (user_bufs > memlock_limit) {
  530. atomic_long_sub(pages, &user->locked_vm);
  531. return -EPERM;
  532. }
  533. }
  534. return 0;
  535. }
  536. void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
  537. {
  538. if (user)
  539. atomic_long_sub(pages, &user->locked_vm);
  540. }
  541. static int bpf_prog_charge_memlock(struct bpf_prog *prog)
  542. {
  543. struct user_struct *user = get_current_user();
  544. int ret;
  545. ret = __bpf_prog_charge(user, prog->pages);
  546. if (ret) {
  547. free_uid(user);
  548. return ret;
  549. }
  550. prog->aux->user = user;
  551. return 0;
  552. }
  553. static void bpf_prog_uncharge_memlock(struct bpf_prog *prog)
  554. {
  555. struct user_struct *user = prog->aux->user;
  556. __bpf_prog_uncharge(user, prog->pages);
  557. free_uid(user);
  558. }
  559. static void __bpf_prog_put_rcu(struct rcu_head *rcu)
  560. {
  561. struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
  562. free_used_maps(aux);
  563. bpf_prog_uncharge_memlock(aux->prog);
  564. bpf_prog_free(aux->prog);
  565. }
  566. void bpf_prog_put(struct bpf_prog *prog)
  567. {
  568. if (atomic_dec_and_test(&prog->aux->refcnt)) {
  569. trace_bpf_prog_put_rcu(prog);
  570. call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
  571. }
  572. }
  573. EXPORT_SYMBOL_GPL(bpf_prog_put);
  574. static int bpf_prog_release(struct inode *inode, struct file *filp)
  575. {
  576. struct bpf_prog *prog = filp->private_data;
  577. bpf_prog_put(prog);
  578. return 0;
  579. }
  580. #ifdef CONFIG_PROC_FS
  581. static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
  582. {
  583. const struct bpf_prog *prog = filp->private_data;
  584. char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
  585. bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
  586. seq_printf(m,
  587. "prog_type:\t%u\n"
  588. "prog_jited:\t%u\n"
  589. "prog_tag:\t%s\n"
  590. "memlock:\t%llu\n",
  591. prog->type,
  592. prog->jited,
  593. prog_tag,
  594. prog->pages * 1ULL << PAGE_SHIFT);
  595. }
  596. #endif
  597. static const struct file_operations bpf_prog_fops = {
  598. #ifdef CONFIG_PROC_FS
  599. .show_fdinfo = bpf_prog_show_fdinfo,
  600. #endif
  601. .release = bpf_prog_release,
  602. };
  603. int bpf_prog_new_fd(struct bpf_prog *prog)
  604. {
  605. return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
  606. O_RDWR | O_CLOEXEC);
  607. }
  608. static struct bpf_prog *____bpf_prog_get(struct fd f)
  609. {
  610. if (!f.file)
  611. return ERR_PTR(-EBADF);
  612. if (f.file->f_op != &bpf_prog_fops) {
  613. fdput(f);
  614. return ERR_PTR(-EINVAL);
  615. }
  616. return f.file->private_data;
  617. }
  618. struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i)
  619. {
  620. if (atomic_add_return(i, &prog->aux->refcnt) > BPF_MAX_REFCNT) {
  621. atomic_sub(i, &prog->aux->refcnt);
  622. return ERR_PTR(-EBUSY);
  623. }
  624. return prog;
  625. }
  626. EXPORT_SYMBOL_GPL(bpf_prog_add);
  627. void bpf_prog_sub(struct bpf_prog *prog, int i)
  628. {
  629. /* Only to be used for undoing previous bpf_prog_add() in some
  630. * error path. We still know that another entity in our call
  631. * path holds a reference to the program, thus atomic_sub() can
  632. * be safely used in such cases!
  633. */
  634. WARN_ON(atomic_sub_return(i, &prog->aux->refcnt) == 0);
  635. }
  636. EXPORT_SYMBOL_GPL(bpf_prog_sub);
  637. struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
  638. {
  639. return bpf_prog_add(prog, 1);
  640. }
  641. EXPORT_SYMBOL_GPL(bpf_prog_inc);
  642. static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *type)
  643. {
  644. struct fd f = fdget(ufd);
  645. struct bpf_prog *prog;
  646. prog = ____bpf_prog_get(f);
  647. if (IS_ERR(prog))
  648. return prog;
  649. if (type && prog->type != *type) {
  650. prog = ERR_PTR(-EINVAL);
  651. goto out;
  652. }
  653. prog = bpf_prog_inc(prog);
  654. out:
  655. fdput(f);
  656. return prog;
  657. }
  658. struct bpf_prog *bpf_prog_get(u32 ufd)
  659. {
  660. return __bpf_prog_get(ufd, NULL);
  661. }
  662. struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type)
  663. {
  664. struct bpf_prog *prog = __bpf_prog_get(ufd, &type);
  665. if (!IS_ERR(prog))
  666. trace_bpf_prog_get_type(prog);
  667. return prog;
  668. }
  669. EXPORT_SYMBOL_GPL(bpf_prog_get_type);
  670. /* last field in 'union bpf_attr' used by this command */
  671. #define BPF_PROG_LOAD_LAST_FIELD kern_version
  672. static int bpf_prog_load(union bpf_attr *attr)
  673. {
  674. enum bpf_prog_type type = attr->prog_type;
  675. struct bpf_prog *prog;
  676. int err;
  677. char license[128];
  678. bool is_gpl;
  679. if (CHECK_ATTR(BPF_PROG_LOAD))
  680. return -EINVAL;
  681. /* copy eBPF program license from user space */
  682. if (strncpy_from_user(license, u64_to_user_ptr(attr->license),
  683. sizeof(license) - 1) < 0)
  684. return -EFAULT;
  685. license[sizeof(license) - 1] = 0;
  686. /* eBPF programs must be GPL compatible to use GPL-ed functions */
  687. is_gpl = license_is_gpl_compatible(license);
  688. if (attr->insn_cnt == 0 || attr->insn_cnt > BPF_MAXINSNS)
  689. return -E2BIG;
  690. if (type == BPF_PROG_TYPE_KPROBE &&
  691. attr->kern_version != LINUX_VERSION_CODE)
  692. return -EINVAL;
  693. if (type != BPF_PROG_TYPE_SOCKET_FILTER && !capable(CAP_SYS_ADMIN))
  694. return -EPERM;
  695. /* plain bpf_prog allocation */
  696. prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
  697. if (!prog)
  698. return -ENOMEM;
  699. err = bpf_prog_charge_memlock(prog);
  700. if (err)
  701. goto free_prog_nouncharge;
  702. prog->len = attr->insn_cnt;
  703. err = -EFAULT;
  704. if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns),
  705. bpf_prog_insn_size(prog)) != 0)
  706. goto free_prog;
  707. prog->orig_prog = NULL;
  708. prog->jited = 0;
  709. atomic_set(&prog->aux->refcnt, 1);
  710. prog->gpl_compatible = is_gpl ? 1 : 0;
  711. /* find program type: socket_filter vs tracing_filter */
  712. err = find_prog_type(type, prog);
  713. if (err < 0)
  714. goto free_prog;
  715. /* run eBPF verifier */
  716. err = bpf_check(&prog, attr);
  717. if (err < 0)
  718. goto free_used_maps;
  719. /* fixup BPF_CALL->imm field */
  720. fixup_bpf_calls(prog);
  721. /* eBPF program is ready to be JITed */
  722. prog = bpf_prog_select_runtime(prog, &err);
  723. if (err < 0)
  724. goto free_used_maps;
  725. err = bpf_prog_new_fd(prog);
  726. if (err < 0)
  727. /* failed to allocate fd */
  728. goto free_used_maps;
  729. trace_bpf_prog_load(prog, err);
  730. return err;
  731. free_used_maps:
  732. free_used_maps(prog->aux);
  733. free_prog:
  734. bpf_prog_uncharge_memlock(prog);
  735. free_prog_nouncharge:
  736. bpf_prog_free(prog);
  737. return err;
  738. }
  739. #define BPF_OBJ_LAST_FIELD bpf_fd
  740. static int bpf_obj_pin(const union bpf_attr *attr)
  741. {
  742. if (CHECK_ATTR(BPF_OBJ))
  743. return -EINVAL;
  744. return bpf_obj_pin_user(attr->bpf_fd, u64_to_user_ptr(attr->pathname));
  745. }
  746. static int bpf_obj_get(const union bpf_attr *attr)
  747. {
  748. if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0)
  749. return -EINVAL;
  750. return bpf_obj_get_user(u64_to_user_ptr(attr->pathname));
  751. }
  752. #ifdef CONFIG_CGROUP_BPF
  753. #define BPF_PROG_ATTACH_LAST_FIELD attach_type
  754. static int bpf_prog_attach(const union bpf_attr *attr)
  755. {
  756. struct bpf_prog *prog;
  757. struct cgroup *cgrp;
  758. enum bpf_prog_type ptype;
  759. if (!capable(CAP_NET_ADMIN))
  760. return -EPERM;
  761. if (CHECK_ATTR(BPF_PROG_ATTACH))
  762. return -EINVAL;
  763. switch (attr->attach_type) {
  764. case BPF_CGROUP_INET_INGRESS:
  765. case BPF_CGROUP_INET_EGRESS:
  766. ptype = BPF_PROG_TYPE_CGROUP_SKB;
  767. break;
  768. case BPF_CGROUP_INET_SOCK_CREATE:
  769. ptype = BPF_PROG_TYPE_CGROUP_SOCK;
  770. break;
  771. default:
  772. return -EINVAL;
  773. }
  774. prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
  775. if (IS_ERR(prog))
  776. return PTR_ERR(prog);
  777. cgrp = cgroup_get_from_fd(attr->target_fd);
  778. if (IS_ERR(cgrp)) {
  779. bpf_prog_put(prog);
  780. return PTR_ERR(cgrp);
  781. }
  782. cgroup_bpf_update(cgrp, prog, attr->attach_type);
  783. cgroup_put(cgrp);
  784. return 0;
  785. }
  786. #define BPF_PROG_DETACH_LAST_FIELD attach_type
  787. static int bpf_prog_detach(const union bpf_attr *attr)
  788. {
  789. struct cgroup *cgrp;
  790. if (!capable(CAP_NET_ADMIN))
  791. return -EPERM;
  792. if (CHECK_ATTR(BPF_PROG_DETACH))
  793. return -EINVAL;
  794. switch (attr->attach_type) {
  795. case BPF_CGROUP_INET_INGRESS:
  796. case BPF_CGROUP_INET_EGRESS:
  797. case BPF_CGROUP_INET_SOCK_CREATE:
  798. cgrp = cgroup_get_from_fd(attr->target_fd);
  799. if (IS_ERR(cgrp))
  800. return PTR_ERR(cgrp);
  801. cgroup_bpf_update(cgrp, NULL, attr->attach_type);
  802. cgroup_put(cgrp);
  803. break;
  804. default:
  805. return -EINVAL;
  806. }
  807. return 0;
  808. }
  809. #endif /* CONFIG_CGROUP_BPF */
  810. SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
  811. {
  812. union bpf_attr attr = {};
  813. int err;
  814. if (!capable(CAP_SYS_ADMIN) && sysctl_unprivileged_bpf_disabled)
  815. return -EPERM;
  816. if (!access_ok(VERIFY_READ, uattr, 1))
  817. return -EFAULT;
  818. if (size > PAGE_SIZE) /* silly large */
  819. return -E2BIG;
  820. /* If we're handed a bigger struct than we know of,
  821. * ensure all the unknown bits are 0 - i.e. new
  822. * user-space does not rely on any kernel feature
  823. * extensions we dont know about yet.
  824. */
  825. if (size > sizeof(attr)) {
  826. unsigned char __user *addr;
  827. unsigned char __user *end;
  828. unsigned char val;
  829. addr = (void __user *)uattr + sizeof(attr);
  830. end = (void __user *)uattr + size;
  831. for (; addr < end; addr++) {
  832. err = get_user(val, addr);
  833. if (err)
  834. return err;
  835. if (val)
  836. return -E2BIG;
  837. }
  838. size = sizeof(attr);
  839. }
  840. /* copy attributes from user space, may be less than sizeof(bpf_attr) */
  841. if (copy_from_user(&attr, uattr, size) != 0)
  842. return -EFAULT;
  843. switch (cmd) {
  844. case BPF_MAP_CREATE:
  845. err = map_create(&attr);
  846. break;
  847. case BPF_MAP_LOOKUP_ELEM:
  848. err = map_lookup_elem(&attr);
  849. break;
  850. case BPF_MAP_UPDATE_ELEM:
  851. err = map_update_elem(&attr);
  852. break;
  853. case BPF_MAP_DELETE_ELEM:
  854. err = map_delete_elem(&attr);
  855. break;
  856. case BPF_MAP_GET_NEXT_KEY:
  857. err = map_get_next_key(&attr);
  858. break;
  859. case BPF_PROG_LOAD:
  860. err = bpf_prog_load(&attr);
  861. break;
  862. case BPF_OBJ_PIN:
  863. err = bpf_obj_pin(&attr);
  864. break;
  865. case BPF_OBJ_GET:
  866. err = bpf_obj_get(&attr);
  867. break;
  868. #ifdef CONFIG_CGROUP_BPF
  869. case BPF_PROG_ATTACH:
  870. err = bpf_prog_attach(&attr);
  871. break;
  872. case BPF_PROG_DETACH:
  873. err = bpf_prog_detach(&attr);
  874. break;
  875. #endif
  876. default:
  877. err = -EINVAL;
  878. break;
  879. }
  880. return err;
  881. }