syscall.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879
  1. /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
  2. *
  3. * This program is free software; you can redistribute it and/or
  4. * modify it under the terms of version 2 of the GNU General Public
  5. * License as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful, but
  8. * WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  10. * General Public License for more details.
  11. */
  12. #include <linux/bpf.h>
  13. #include <linux/syscalls.h>
  14. #include <linux/slab.h>
  15. #include <linux/anon_inodes.h>
  16. #include <linux/file.h>
  17. #include <linux/license.h>
  18. #include <linux/filter.h>
  19. #include <linux/version.h>
  20. DEFINE_PER_CPU(int, bpf_prog_active);
  21. int sysctl_unprivileged_bpf_disabled __read_mostly;
  22. static LIST_HEAD(bpf_map_types);
  23. static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
  24. {
  25. struct bpf_map_type_list *tl;
  26. struct bpf_map *map;
  27. list_for_each_entry(tl, &bpf_map_types, list_node) {
  28. if (tl->type == attr->map_type) {
  29. map = tl->ops->map_alloc(attr);
  30. if (IS_ERR(map))
  31. return map;
  32. map->ops = tl->ops;
  33. map->map_type = attr->map_type;
  34. return map;
  35. }
  36. }
  37. return ERR_PTR(-EINVAL);
  38. }
  39. /* boot time registration of different map implementations */
  40. void bpf_register_map_type(struct bpf_map_type_list *tl)
  41. {
  42. list_add(&tl->list_node, &bpf_map_types);
  43. }
  44. int bpf_map_precharge_memlock(u32 pages)
  45. {
  46. struct user_struct *user = get_current_user();
  47. unsigned long memlock_limit, cur;
  48. memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
  49. cur = atomic_long_read(&user->locked_vm);
  50. free_uid(user);
  51. if (cur + pages > memlock_limit)
  52. return -EPERM;
  53. return 0;
  54. }
  55. static int bpf_map_charge_memlock(struct bpf_map *map)
  56. {
  57. struct user_struct *user = get_current_user();
  58. unsigned long memlock_limit;
  59. memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
  60. atomic_long_add(map->pages, &user->locked_vm);
  61. if (atomic_long_read(&user->locked_vm) > memlock_limit) {
  62. atomic_long_sub(map->pages, &user->locked_vm);
  63. free_uid(user);
  64. return -EPERM;
  65. }
  66. map->user = user;
  67. return 0;
  68. }
  69. static void bpf_map_uncharge_memlock(struct bpf_map *map)
  70. {
  71. struct user_struct *user = map->user;
  72. atomic_long_sub(map->pages, &user->locked_vm);
  73. free_uid(user);
  74. }
  75. /* called from workqueue */
  76. static void bpf_map_free_deferred(struct work_struct *work)
  77. {
  78. struct bpf_map *map = container_of(work, struct bpf_map, work);
  79. bpf_map_uncharge_memlock(map);
  80. /* implementation dependent freeing */
  81. map->ops->map_free(map);
  82. }
  83. static void bpf_map_put_uref(struct bpf_map *map)
  84. {
  85. if (atomic_dec_and_test(&map->usercnt)) {
  86. if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY)
  87. bpf_fd_array_map_clear(map);
  88. }
  89. }
  90. /* decrement map refcnt and schedule it for freeing via workqueue
  91. * (unrelying map implementation ops->map_free() might sleep)
  92. */
  93. void bpf_map_put(struct bpf_map *map)
  94. {
  95. if (atomic_dec_and_test(&map->refcnt)) {
  96. INIT_WORK(&map->work, bpf_map_free_deferred);
  97. schedule_work(&map->work);
  98. }
  99. }
  100. void bpf_map_put_with_uref(struct bpf_map *map)
  101. {
  102. bpf_map_put_uref(map);
  103. bpf_map_put(map);
  104. }
  105. static int bpf_map_release(struct inode *inode, struct file *filp)
  106. {
  107. struct bpf_map *map = filp->private_data;
  108. if (map->ops->map_release)
  109. map->ops->map_release(map, filp);
  110. bpf_map_put_with_uref(map);
  111. return 0;
  112. }
  113. #ifdef CONFIG_PROC_FS
  114. static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
  115. {
  116. const struct bpf_map *map = filp->private_data;
  117. seq_printf(m,
  118. "map_type:\t%u\n"
  119. "key_size:\t%u\n"
  120. "value_size:\t%u\n"
  121. "max_entries:\t%u\n"
  122. "map_flags:\t%#x\n",
  123. map->map_type,
  124. map->key_size,
  125. map->value_size,
  126. map->max_entries,
  127. map->map_flags);
  128. }
  129. #endif
  130. static const struct file_operations bpf_map_fops = {
  131. #ifdef CONFIG_PROC_FS
  132. .show_fdinfo = bpf_map_show_fdinfo,
  133. #endif
  134. .release = bpf_map_release,
  135. };
  136. int bpf_map_new_fd(struct bpf_map *map)
  137. {
  138. return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
  139. O_RDWR | O_CLOEXEC);
  140. }
  141. /* helper macro to check that unused fields 'union bpf_attr' are zero */
  142. #define CHECK_ATTR(CMD) \
  143. memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
  144. sizeof(attr->CMD##_LAST_FIELD), 0, \
  145. sizeof(*attr) - \
  146. offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
  147. sizeof(attr->CMD##_LAST_FIELD)) != NULL
  148. #define BPF_MAP_CREATE_LAST_FIELD map_flags
  149. /* called via syscall */
  150. static int map_create(union bpf_attr *attr)
  151. {
  152. struct bpf_map *map;
  153. int err;
  154. err = CHECK_ATTR(BPF_MAP_CREATE);
  155. if (err)
  156. return -EINVAL;
  157. /* find map type and init map: hashtable vs rbtree vs bloom vs ... */
  158. map = find_and_alloc_map(attr);
  159. if (IS_ERR(map))
  160. return PTR_ERR(map);
  161. atomic_set(&map->refcnt, 1);
  162. atomic_set(&map->usercnt, 1);
  163. err = bpf_map_charge_memlock(map);
  164. if (err)
  165. goto free_map;
  166. err = bpf_map_new_fd(map);
  167. if (err < 0)
  168. /* failed to allocate fd */
  169. goto free_map;
  170. return err;
  171. free_map:
  172. map->ops->map_free(map);
  173. return err;
  174. }
  175. /* if error is returned, fd is released.
  176. * On success caller should complete fd access with matching fdput()
  177. */
  178. struct bpf_map *__bpf_map_get(struct fd f)
  179. {
  180. if (!f.file)
  181. return ERR_PTR(-EBADF);
  182. if (f.file->f_op != &bpf_map_fops) {
  183. fdput(f);
  184. return ERR_PTR(-EINVAL);
  185. }
  186. return f.file->private_data;
  187. }
  188. /* prog's and map's refcnt limit */
  189. #define BPF_MAX_REFCNT 32768
  190. struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref)
  191. {
  192. if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) {
  193. atomic_dec(&map->refcnt);
  194. return ERR_PTR(-EBUSY);
  195. }
  196. if (uref)
  197. atomic_inc(&map->usercnt);
  198. return map;
  199. }
  200. struct bpf_map *bpf_map_get_with_uref(u32 ufd)
  201. {
  202. struct fd f = fdget(ufd);
  203. struct bpf_map *map;
  204. map = __bpf_map_get(f);
  205. if (IS_ERR(map))
  206. return map;
  207. map = bpf_map_inc(map, true);
  208. fdput(f);
  209. return map;
  210. }
  211. /* helper to convert user pointers passed inside __aligned_u64 fields */
  212. static void __user *u64_to_ptr(__u64 val)
  213. {
  214. return (void __user *) (unsigned long) val;
  215. }
  216. int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
  217. {
  218. return -ENOTSUPP;
  219. }
  220. /* last field in 'union bpf_attr' used by this command */
  221. #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD value
  222. static int map_lookup_elem(union bpf_attr *attr)
  223. {
  224. void __user *ukey = u64_to_ptr(attr->key);
  225. void __user *uvalue = u64_to_ptr(attr->value);
  226. int ufd = attr->map_fd;
  227. struct bpf_map *map;
  228. void *key, *value, *ptr;
  229. u32 value_size;
  230. struct fd f;
  231. int err;
  232. if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
  233. return -EINVAL;
  234. f = fdget(ufd);
  235. map = __bpf_map_get(f);
  236. if (IS_ERR(map))
  237. return PTR_ERR(map);
  238. err = -ENOMEM;
  239. key = kmalloc(map->key_size, GFP_USER);
  240. if (!key)
  241. goto err_put;
  242. err = -EFAULT;
  243. if (copy_from_user(key, ukey, map->key_size) != 0)
  244. goto free_key;
  245. if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
  246. map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
  247. value_size = round_up(map->value_size, 8) * num_possible_cpus();
  248. else
  249. value_size = map->value_size;
  250. err = -ENOMEM;
  251. value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
  252. if (!value)
  253. goto free_key;
  254. if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH) {
  255. err = bpf_percpu_hash_copy(map, key, value);
  256. } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
  257. err = bpf_percpu_array_copy(map, key, value);
  258. } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
  259. err = bpf_stackmap_copy(map, key, value);
  260. } else {
  261. rcu_read_lock();
  262. ptr = map->ops->map_lookup_elem(map, key);
  263. if (ptr)
  264. memcpy(value, ptr, value_size);
  265. rcu_read_unlock();
  266. err = ptr ? 0 : -ENOENT;
  267. }
  268. if (err)
  269. goto free_value;
  270. err = -EFAULT;
  271. if (copy_to_user(uvalue, value, value_size) != 0)
  272. goto free_value;
  273. err = 0;
  274. free_value:
  275. kfree(value);
  276. free_key:
  277. kfree(key);
  278. err_put:
  279. fdput(f);
  280. return err;
  281. }
  282. #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
  283. static int map_update_elem(union bpf_attr *attr)
  284. {
  285. void __user *ukey = u64_to_ptr(attr->key);
  286. void __user *uvalue = u64_to_ptr(attr->value);
  287. int ufd = attr->map_fd;
  288. struct bpf_map *map;
  289. void *key, *value;
  290. u32 value_size;
  291. struct fd f;
  292. int err;
  293. if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
  294. return -EINVAL;
  295. f = fdget(ufd);
  296. map = __bpf_map_get(f);
  297. if (IS_ERR(map))
  298. return PTR_ERR(map);
  299. err = -ENOMEM;
  300. key = kmalloc(map->key_size, GFP_USER);
  301. if (!key)
  302. goto err_put;
  303. err = -EFAULT;
  304. if (copy_from_user(key, ukey, map->key_size) != 0)
  305. goto free_key;
  306. if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
  307. map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
  308. value_size = round_up(map->value_size, 8) * num_possible_cpus();
  309. else
  310. value_size = map->value_size;
  311. err = -ENOMEM;
  312. value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
  313. if (!value)
  314. goto free_key;
  315. err = -EFAULT;
  316. if (copy_from_user(value, uvalue, value_size) != 0)
  317. goto free_value;
  318. /* must increment bpf_prog_active to avoid kprobe+bpf triggering from
  319. * inside bpf map update or delete otherwise deadlocks are possible
  320. */
  321. preempt_disable();
  322. __this_cpu_inc(bpf_prog_active);
  323. if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH) {
  324. err = bpf_percpu_hash_update(map, key, value, attr->flags);
  325. } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
  326. err = bpf_percpu_array_update(map, key, value, attr->flags);
  327. } else if (map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY ||
  328. map->map_type == BPF_MAP_TYPE_PROG_ARRAY) {
  329. rcu_read_lock();
  330. err = bpf_fd_array_map_update_elem(map, f.file, key, value,
  331. attr->flags);
  332. rcu_read_unlock();
  333. } else {
  334. rcu_read_lock();
  335. err = map->ops->map_update_elem(map, key, value, attr->flags);
  336. rcu_read_unlock();
  337. }
  338. __this_cpu_dec(bpf_prog_active);
  339. preempt_enable();
  340. free_value:
  341. kfree(value);
  342. free_key:
  343. kfree(key);
  344. err_put:
  345. fdput(f);
  346. return err;
  347. }
  348. #define BPF_MAP_DELETE_ELEM_LAST_FIELD key
  349. static int map_delete_elem(union bpf_attr *attr)
  350. {
  351. void __user *ukey = u64_to_ptr(attr->key);
  352. int ufd = attr->map_fd;
  353. struct bpf_map *map;
  354. struct fd f;
  355. void *key;
  356. int err;
  357. if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
  358. return -EINVAL;
  359. f = fdget(ufd);
  360. map = __bpf_map_get(f);
  361. if (IS_ERR(map))
  362. return PTR_ERR(map);
  363. err = -ENOMEM;
  364. key = kmalloc(map->key_size, GFP_USER);
  365. if (!key)
  366. goto err_put;
  367. err = -EFAULT;
  368. if (copy_from_user(key, ukey, map->key_size) != 0)
  369. goto free_key;
  370. preempt_disable();
  371. __this_cpu_inc(bpf_prog_active);
  372. rcu_read_lock();
  373. err = map->ops->map_delete_elem(map, key);
  374. rcu_read_unlock();
  375. __this_cpu_dec(bpf_prog_active);
  376. preempt_enable();
  377. free_key:
  378. kfree(key);
  379. err_put:
  380. fdput(f);
  381. return err;
  382. }
  383. /* last field in 'union bpf_attr' used by this command */
  384. #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
  385. static int map_get_next_key(union bpf_attr *attr)
  386. {
  387. void __user *ukey = u64_to_ptr(attr->key);
  388. void __user *unext_key = u64_to_ptr(attr->next_key);
  389. int ufd = attr->map_fd;
  390. struct bpf_map *map;
  391. void *key, *next_key;
  392. struct fd f;
  393. int err;
  394. if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
  395. return -EINVAL;
  396. f = fdget(ufd);
  397. map = __bpf_map_get(f);
  398. if (IS_ERR(map))
  399. return PTR_ERR(map);
  400. err = -ENOMEM;
  401. key = kmalloc(map->key_size, GFP_USER);
  402. if (!key)
  403. goto err_put;
  404. err = -EFAULT;
  405. if (copy_from_user(key, ukey, map->key_size) != 0)
  406. goto free_key;
  407. err = -ENOMEM;
  408. next_key = kmalloc(map->key_size, GFP_USER);
  409. if (!next_key)
  410. goto free_key;
  411. rcu_read_lock();
  412. err = map->ops->map_get_next_key(map, key, next_key);
  413. rcu_read_unlock();
  414. if (err)
  415. goto free_next_key;
  416. err = -EFAULT;
  417. if (copy_to_user(unext_key, next_key, map->key_size) != 0)
  418. goto free_next_key;
  419. err = 0;
  420. free_next_key:
  421. kfree(next_key);
  422. free_key:
  423. kfree(key);
  424. err_put:
  425. fdput(f);
  426. return err;
  427. }
  428. static LIST_HEAD(bpf_prog_types);
  429. static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
  430. {
  431. struct bpf_prog_type_list *tl;
  432. list_for_each_entry(tl, &bpf_prog_types, list_node) {
  433. if (tl->type == type) {
  434. prog->aux->ops = tl->ops;
  435. prog->type = type;
  436. return 0;
  437. }
  438. }
  439. return -EINVAL;
  440. }
  441. void bpf_register_prog_type(struct bpf_prog_type_list *tl)
  442. {
  443. list_add(&tl->list_node, &bpf_prog_types);
  444. }
  445. /* fixup insn->imm field of bpf_call instructions:
  446. * if (insn->imm == BPF_FUNC_map_lookup_elem)
  447. * insn->imm = bpf_map_lookup_elem - __bpf_call_base;
  448. * else if (insn->imm == BPF_FUNC_map_update_elem)
  449. * insn->imm = bpf_map_update_elem - __bpf_call_base;
  450. * else ...
  451. *
  452. * this function is called after eBPF program passed verification
  453. */
  454. static void fixup_bpf_calls(struct bpf_prog *prog)
  455. {
  456. const struct bpf_func_proto *fn;
  457. int i;
  458. for (i = 0; i < prog->len; i++) {
  459. struct bpf_insn *insn = &prog->insnsi[i];
  460. if (insn->code == (BPF_JMP | BPF_CALL)) {
  461. /* we reach here when program has bpf_call instructions
  462. * and it passed bpf_check(), means that
  463. * ops->get_func_proto must have been supplied, check it
  464. */
  465. BUG_ON(!prog->aux->ops->get_func_proto);
  466. if (insn->imm == BPF_FUNC_get_route_realm)
  467. prog->dst_needed = 1;
  468. if (insn->imm == BPF_FUNC_get_prandom_u32)
  469. bpf_user_rnd_init_once();
  470. if (insn->imm == BPF_FUNC_tail_call) {
  471. /* mark bpf_tail_call as different opcode
  472. * to avoid conditional branch in
  473. * interpeter for every normal call
  474. * and to prevent accidental JITing by
  475. * JIT compiler that doesn't support
  476. * bpf_tail_call yet
  477. */
  478. insn->imm = 0;
  479. insn->code |= BPF_X;
  480. continue;
  481. }
  482. fn = prog->aux->ops->get_func_proto(insn->imm);
  483. /* all functions that have prototype and verifier allowed
  484. * programs to call them, must be real in-kernel functions
  485. */
  486. BUG_ON(!fn->func);
  487. insn->imm = fn->func - __bpf_call_base;
  488. }
  489. }
  490. }
  491. /* drop refcnt on maps used by eBPF program and free auxilary data */
  492. static void free_used_maps(struct bpf_prog_aux *aux)
  493. {
  494. int i;
  495. for (i = 0; i < aux->used_map_cnt; i++)
  496. bpf_map_put(aux->used_maps[i]);
  497. kfree(aux->used_maps);
  498. }
  499. static int bpf_prog_charge_memlock(struct bpf_prog *prog)
  500. {
  501. struct user_struct *user = get_current_user();
  502. unsigned long memlock_limit;
  503. memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
  504. atomic_long_add(prog->pages, &user->locked_vm);
  505. if (atomic_long_read(&user->locked_vm) > memlock_limit) {
  506. atomic_long_sub(prog->pages, &user->locked_vm);
  507. free_uid(user);
  508. return -EPERM;
  509. }
  510. prog->aux->user = user;
  511. return 0;
  512. }
  513. static void bpf_prog_uncharge_memlock(struct bpf_prog *prog)
  514. {
  515. struct user_struct *user = prog->aux->user;
  516. atomic_long_sub(prog->pages, &user->locked_vm);
  517. free_uid(user);
  518. }
  519. static void __bpf_prog_put_rcu(struct rcu_head *rcu)
  520. {
  521. struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
  522. free_used_maps(aux);
  523. bpf_prog_uncharge_memlock(aux->prog);
  524. bpf_prog_free(aux->prog);
  525. }
  526. void bpf_prog_put(struct bpf_prog *prog)
  527. {
  528. if (atomic_dec_and_test(&prog->aux->refcnt))
  529. call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
  530. }
  531. EXPORT_SYMBOL_GPL(bpf_prog_put);
  532. static int bpf_prog_release(struct inode *inode, struct file *filp)
  533. {
  534. struct bpf_prog *prog = filp->private_data;
  535. bpf_prog_put(prog);
  536. return 0;
  537. }
  538. static const struct file_operations bpf_prog_fops = {
  539. .release = bpf_prog_release,
  540. };
  541. int bpf_prog_new_fd(struct bpf_prog *prog)
  542. {
  543. return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
  544. O_RDWR | O_CLOEXEC);
  545. }
  546. static struct bpf_prog *__bpf_prog_get(struct fd f)
  547. {
  548. if (!f.file)
  549. return ERR_PTR(-EBADF);
  550. if (f.file->f_op != &bpf_prog_fops) {
  551. fdput(f);
  552. return ERR_PTR(-EINVAL);
  553. }
  554. return f.file->private_data;
  555. }
  556. struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
  557. {
  558. if (atomic_inc_return(&prog->aux->refcnt) > BPF_MAX_REFCNT) {
  559. atomic_dec(&prog->aux->refcnt);
  560. return ERR_PTR(-EBUSY);
  561. }
  562. return prog;
  563. }
  564. /* called by sockets/tracing/seccomp before attaching program to an event
  565. * pairs with bpf_prog_put()
  566. */
  567. struct bpf_prog *bpf_prog_get(u32 ufd)
  568. {
  569. struct fd f = fdget(ufd);
  570. struct bpf_prog *prog;
  571. prog = __bpf_prog_get(f);
  572. if (IS_ERR(prog))
  573. return prog;
  574. prog = bpf_prog_inc(prog);
  575. fdput(f);
  576. return prog;
  577. }
  578. EXPORT_SYMBOL_GPL(bpf_prog_get);
  579. /* last field in 'union bpf_attr' used by this command */
  580. #define BPF_PROG_LOAD_LAST_FIELD kern_version
  581. static int bpf_prog_load(union bpf_attr *attr)
  582. {
  583. enum bpf_prog_type type = attr->prog_type;
  584. struct bpf_prog *prog;
  585. int err;
  586. char license[128];
  587. bool is_gpl;
  588. if (CHECK_ATTR(BPF_PROG_LOAD))
  589. return -EINVAL;
  590. /* copy eBPF program license from user space */
  591. if (strncpy_from_user(license, u64_to_ptr(attr->license),
  592. sizeof(license) - 1) < 0)
  593. return -EFAULT;
  594. license[sizeof(license) - 1] = 0;
  595. /* eBPF programs must be GPL compatible to use GPL-ed functions */
  596. is_gpl = license_is_gpl_compatible(license);
  597. if (attr->insn_cnt >= BPF_MAXINSNS)
  598. return -EINVAL;
  599. if (type == BPF_PROG_TYPE_KPROBE &&
  600. attr->kern_version != LINUX_VERSION_CODE)
  601. return -EINVAL;
  602. if (type != BPF_PROG_TYPE_SOCKET_FILTER && !capable(CAP_SYS_ADMIN))
  603. return -EPERM;
  604. /* plain bpf_prog allocation */
  605. prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
  606. if (!prog)
  607. return -ENOMEM;
  608. err = bpf_prog_charge_memlock(prog);
  609. if (err)
  610. goto free_prog_nouncharge;
  611. prog->len = attr->insn_cnt;
  612. err = -EFAULT;
  613. if (copy_from_user(prog->insns, u64_to_ptr(attr->insns),
  614. prog->len * sizeof(struct bpf_insn)) != 0)
  615. goto free_prog;
  616. prog->orig_prog = NULL;
  617. prog->jited = 0;
  618. atomic_set(&prog->aux->refcnt, 1);
  619. prog->gpl_compatible = is_gpl ? 1 : 0;
  620. /* find program type: socket_filter vs tracing_filter */
  621. err = find_prog_type(type, prog);
  622. if (err < 0)
  623. goto free_prog;
  624. /* run eBPF verifier */
  625. err = bpf_check(&prog, attr);
  626. if (err < 0)
  627. goto free_used_maps;
  628. /* fixup BPF_CALL->imm field */
  629. fixup_bpf_calls(prog);
  630. /* eBPF program is ready to be JITed */
  631. prog = bpf_prog_select_runtime(prog, &err);
  632. if (err < 0)
  633. goto free_used_maps;
  634. err = bpf_prog_new_fd(prog);
  635. if (err < 0)
  636. /* failed to allocate fd */
  637. goto free_used_maps;
  638. return err;
  639. free_used_maps:
  640. free_used_maps(prog->aux);
  641. free_prog:
  642. bpf_prog_uncharge_memlock(prog);
  643. free_prog_nouncharge:
  644. bpf_prog_free(prog);
  645. return err;
  646. }
  647. #define BPF_OBJ_LAST_FIELD bpf_fd
  648. static int bpf_obj_pin(const union bpf_attr *attr)
  649. {
  650. if (CHECK_ATTR(BPF_OBJ))
  651. return -EINVAL;
  652. return bpf_obj_pin_user(attr->bpf_fd, u64_to_ptr(attr->pathname));
  653. }
  654. static int bpf_obj_get(const union bpf_attr *attr)
  655. {
  656. if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0)
  657. return -EINVAL;
  658. return bpf_obj_get_user(u64_to_ptr(attr->pathname));
  659. }
  660. SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
  661. {
  662. union bpf_attr attr = {};
  663. int err;
  664. if (!capable(CAP_SYS_ADMIN) && sysctl_unprivileged_bpf_disabled)
  665. return -EPERM;
  666. if (!access_ok(VERIFY_READ, uattr, 1))
  667. return -EFAULT;
  668. if (size > PAGE_SIZE) /* silly large */
  669. return -E2BIG;
  670. /* If we're handed a bigger struct than we know of,
  671. * ensure all the unknown bits are 0 - i.e. new
  672. * user-space does not rely on any kernel feature
  673. * extensions we dont know about yet.
  674. */
  675. if (size > sizeof(attr)) {
  676. unsigned char __user *addr;
  677. unsigned char __user *end;
  678. unsigned char val;
  679. addr = (void __user *)uattr + sizeof(attr);
  680. end = (void __user *)uattr + size;
  681. for (; addr < end; addr++) {
  682. err = get_user(val, addr);
  683. if (err)
  684. return err;
  685. if (val)
  686. return -E2BIG;
  687. }
  688. size = sizeof(attr);
  689. }
  690. /* copy attributes from user space, may be less than sizeof(bpf_attr) */
  691. if (copy_from_user(&attr, uattr, size) != 0)
  692. return -EFAULT;
  693. switch (cmd) {
  694. case BPF_MAP_CREATE:
  695. err = map_create(&attr);
  696. break;
  697. case BPF_MAP_LOOKUP_ELEM:
  698. err = map_lookup_elem(&attr);
  699. break;
  700. case BPF_MAP_UPDATE_ELEM:
  701. err = map_update_elem(&attr);
  702. break;
  703. case BPF_MAP_DELETE_ELEM:
  704. err = map_delete_elem(&attr);
  705. break;
  706. case BPF_MAP_GET_NEXT_KEY:
  707. err = map_get_next_key(&attr);
  708. break;
  709. case BPF_PROG_LOAD:
  710. err = bpf_prog_load(&attr);
  711. break;
  712. case BPF_OBJ_PIN:
  713. err = bpf_obj_pin(&attr);
  714. break;
  715. case BPF_OBJ_GET:
  716. err = bpf_obj_get(&attr);
  717. break;
  718. default:
  719. err = -EINVAL;
  720. break;
  721. }
  722. return err;
  723. }