syscall.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824
  1. /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
  2. *
  3. * This program is free software; you can redistribute it and/or
  4. * modify it under the terms of version 2 of the GNU General Public
  5. * License as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful, but
  8. * WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  10. * General Public License for more details.
  11. */
  12. #include <linux/bpf.h>
  13. #include <linux/syscalls.h>
  14. #include <linux/slab.h>
  15. #include <linux/anon_inodes.h>
  16. #include <linux/file.h>
  17. #include <linux/license.h>
  18. #include <linux/filter.h>
  19. #include <linux/version.h>
  20. int sysctl_unprivileged_bpf_disabled __read_mostly;
  21. static LIST_HEAD(bpf_map_types);
  22. static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
  23. {
  24. struct bpf_map_type_list *tl;
  25. struct bpf_map *map;
  26. list_for_each_entry(tl, &bpf_map_types, list_node) {
  27. if (tl->type == attr->map_type) {
  28. map = tl->ops->map_alloc(attr);
  29. if (IS_ERR(map))
  30. return map;
  31. map->ops = tl->ops;
  32. map->map_type = attr->map_type;
  33. return map;
  34. }
  35. }
  36. return ERR_PTR(-EINVAL);
  37. }
  38. /* boot time registration of different map implementations */
  39. void bpf_register_map_type(struct bpf_map_type_list *tl)
  40. {
  41. list_add(&tl->list_node, &bpf_map_types);
  42. }
  43. static int bpf_map_charge_memlock(struct bpf_map *map)
  44. {
  45. struct user_struct *user = get_current_user();
  46. unsigned long memlock_limit;
  47. memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
  48. atomic_long_add(map->pages, &user->locked_vm);
  49. if (atomic_long_read(&user->locked_vm) > memlock_limit) {
  50. atomic_long_sub(map->pages, &user->locked_vm);
  51. free_uid(user);
  52. return -EPERM;
  53. }
  54. map->user = user;
  55. return 0;
  56. }
  57. static void bpf_map_uncharge_memlock(struct bpf_map *map)
  58. {
  59. struct user_struct *user = map->user;
  60. atomic_long_sub(map->pages, &user->locked_vm);
  61. free_uid(user);
  62. }
  63. /* called from workqueue */
  64. static void bpf_map_free_deferred(struct work_struct *work)
  65. {
  66. struct bpf_map *map = container_of(work, struct bpf_map, work);
  67. bpf_map_uncharge_memlock(map);
  68. /* implementation dependent freeing */
  69. map->ops->map_free(map);
  70. }
  71. static void bpf_map_put_uref(struct bpf_map *map)
  72. {
  73. if (atomic_dec_and_test(&map->usercnt)) {
  74. if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY)
  75. bpf_fd_array_map_clear(map);
  76. }
  77. }
  78. /* decrement map refcnt and schedule it for freeing via workqueue
  79. * (unrelying map implementation ops->map_free() might sleep)
  80. */
  81. void bpf_map_put(struct bpf_map *map)
  82. {
  83. if (atomic_dec_and_test(&map->refcnt)) {
  84. INIT_WORK(&map->work, bpf_map_free_deferred);
  85. schedule_work(&map->work);
  86. }
  87. }
  88. void bpf_map_put_with_uref(struct bpf_map *map)
  89. {
  90. bpf_map_put_uref(map);
  91. bpf_map_put(map);
  92. }
  93. static int bpf_map_release(struct inode *inode, struct file *filp)
  94. {
  95. bpf_map_put_with_uref(filp->private_data);
  96. return 0;
  97. }
  98. #ifdef CONFIG_PROC_FS
  99. static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
  100. {
  101. const struct bpf_map *map = filp->private_data;
  102. seq_printf(m,
  103. "map_type:\t%u\n"
  104. "key_size:\t%u\n"
  105. "value_size:\t%u\n"
  106. "max_entries:\t%u\n",
  107. map->map_type,
  108. map->key_size,
  109. map->value_size,
  110. map->max_entries);
  111. }
  112. #endif
  113. static const struct file_operations bpf_map_fops = {
  114. #ifdef CONFIG_PROC_FS
  115. .show_fdinfo = bpf_map_show_fdinfo,
  116. #endif
  117. .release = bpf_map_release,
  118. };
  119. int bpf_map_new_fd(struct bpf_map *map)
  120. {
  121. return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
  122. O_RDWR | O_CLOEXEC);
  123. }
  124. /* helper macro to check that unused fields 'union bpf_attr' are zero */
  125. #define CHECK_ATTR(CMD) \
  126. memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
  127. sizeof(attr->CMD##_LAST_FIELD), 0, \
  128. sizeof(*attr) - \
  129. offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
  130. sizeof(attr->CMD##_LAST_FIELD)) != NULL
  131. #define BPF_MAP_CREATE_LAST_FIELD max_entries
  132. /* called via syscall */
  133. static int map_create(union bpf_attr *attr)
  134. {
  135. struct bpf_map *map;
  136. int err;
  137. err = CHECK_ATTR(BPF_MAP_CREATE);
  138. if (err)
  139. return -EINVAL;
  140. /* find map type and init map: hashtable vs rbtree vs bloom vs ... */
  141. map = find_and_alloc_map(attr);
  142. if (IS_ERR(map))
  143. return PTR_ERR(map);
  144. atomic_set(&map->refcnt, 1);
  145. atomic_set(&map->usercnt, 1);
  146. err = bpf_map_charge_memlock(map);
  147. if (err)
  148. goto free_map;
  149. err = bpf_map_new_fd(map);
  150. if (err < 0)
  151. /* failed to allocate fd */
  152. goto free_map;
  153. return err;
  154. free_map:
  155. map->ops->map_free(map);
  156. return err;
  157. }
  158. /* if error is returned, fd is released.
  159. * On success caller should complete fd access with matching fdput()
  160. */
  161. struct bpf_map *__bpf_map_get(struct fd f)
  162. {
  163. if (!f.file)
  164. return ERR_PTR(-EBADF);
  165. if (f.file->f_op != &bpf_map_fops) {
  166. fdput(f);
  167. return ERR_PTR(-EINVAL);
  168. }
  169. return f.file->private_data;
  170. }
  171. void bpf_map_inc(struct bpf_map *map, bool uref)
  172. {
  173. atomic_inc(&map->refcnt);
  174. if (uref)
  175. atomic_inc(&map->usercnt);
  176. }
  177. struct bpf_map *bpf_map_get_with_uref(u32 ufd)
  178. {
  179. struct fd f = fdget(ufd);
  180. struct bpf_map *map;
  181. map = __bpf_map_get(f);
  182. if (IS_ERR(map))
  183. return map;
  184. bpf_map_inc(map, true);
  185. fdput(f);
  186. return map;
  187. }
  188. /* helper to convert user pointers passed inside __aligned_u64 fields */
  189. static void __user *u64_to_ptr(__u64 val)
  190. {
  191. return (void __user *) (unsigned long) val;
  192. }
  193. /* last field in 'union bpf_attr' used by this command */
  194. #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD value
  195. static int map_lookup_elem(union bpf_attr *attr)
  196. {
  197. void __user *ukey = u64_to_ptr(attr->key);
  198. void __user *uvalue = u64_to_ptr(attr->value);
  199. int ufd = attr->map_fd;
  200. struct bpf_map *map;
  201. void *key, *value, *ptr;
  202. u32 value_size;
  203. struct fd f;
  204. int err;
  205. if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
  206. return -EINVAL;
  207. f = fdget(ufd);
  208. map = __bpf_map_get(f);
  209. if (IS_ERR(map))
  210. return PTR_ERR(map);
  211. err = -ENOMEM;
  212. key = kmalloc(map->key_size, GFP_USER);
  213. if (!key)
  214. goto err_put;
  215. err = -EFAULT;
  216. if (copy_from_user(key, ukey, map->key_size) != 0)
  217. goto free_key;
  218. if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
  219. map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
  220. value_size = round_up(map->value_size, 8) * num_possible_cpus();
  221. else
  222. value_size = map->value_size;
  223. err = -ENOMEM;
  224. value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
  225. if (!value)
  226. goto free_key;
  227. if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH) {
  228. err = bpf_percpu_hash_copy(map, key, value);
  229. } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
  230. err = bpf_percpu_array_copy(map, key, value);
  231. } else {
  232. rcu_read_lock();
  233. ptr = map->ops->map_lookup_elem(map, key);
  234. if (ptr)
  235. memcpy(value, ptr, value_size);
  236. rcu_read_unlock();
  237. err = ptr ? 0 : -ENOENT;
  238. }
  239. if (err)
  240. goto free_value;
  241. err = -EFAULT;
  242. if (copy_to_user(uvalue, value, value_size) != 0)
  243. goto free_value;
  244. err = 0;
  245. free_value:
  246. kfree(value);
  247. free_key:
  248. kfree(key);
  249. err_put:
  250. fdput(f);
  251. return err;
  252. }
  253. #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
  254. static int map_update_elem(union bpf_attr *attr)
  255. {
  256. void __user *ukey = u64_to_ptr(attr->key);
  257. void __user *uvalue = u64_to_ptr(attr->value);
  258. int ufd = attr->map_fd;
  259. struct bpf_map *map;
  260. void *key, *value;
  261. u32 value_size;
  262. struct fd f;
  263. int err;
  264. if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
  265. return -EINVAL;
  266. f = fdget(ufd);
  267. map = __bpf_map_get(f);
  268. if (IS_ERR(map))
  269. return PTR_ERR(map);
  270. err = -ENOMEM;
  271. key = kmalloc(map->key_size, GFP_USER);
  272. if (!key)
  273. goto err_put;
  274. err = -EFAULT;
  275. if (copy_from_user(key, ukey, map->key_size) != 0)
  276. goto free_key;
  277. if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
  278. map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
  279. value_size = round_up(map->value_size, 8) * num_possible_cpus();
  280. else
  281. value_size = map->value_size;
  282. err = -ENOMEM;
  283. value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
  284. if (!value)
  285. goto free_key;
  286. err = -EFAULT;
  287. if (copy_from_user(value, uvalue, value_size) != 0)
  288. goto free_value;
  289. if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH) {
  290. err = bpf_percpu_hash_update(map, key, value, attr->flags);
  291. } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
  292. err = bpf_percpu_array_update(map, key, value, attr->flags);
  293. } else {
  294. rcu_read_lock();
  295. err = map->ops->map_update_elem(map, key, value, attr->flags);
  296. rcu_read_unlock();
  297. }
  298. free_value:
  299. kfree(value);
  300. free_key:
  301. kfree(key);
  302. err_put:
  303. fdput(f);
  304. return err;
  305. }
  306. #define BPF_MAP_DELETE_ELEM_LAST_FIELD key
  307. static int map_delete_elem(union bpf_attr *attr)
  308. {
  309. void __user *ukey = u64_to_ptr(attr->key);
  310. int ufd = attr->map_fd;
  311. struct bpf_map *map;
  312. struct fd f;
  313. void *key;
  314. int err;
  315. if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
  316. return -EINVAL;
  317. f = fdget(ufd);
  318. map = __bpf_map_get(f);
  319. if (IS_ERR(map))
  320. return PTR_ERR(map);
  321. err = -ENOMEM;
  322. key = kmalloc(map->key_size, GFP_USER);
  323. if (!key)
  324. goto err_put;
  325. err = -EFAULT;
  326. if (copy_from_user(key, ukey, map->key_size) != 0)
  327. goto free_key;
  328. rcu_read_lock();
  329. err = map->ops->map_delete_elem(map, key);
  330. rcu_read_unlock();
  331. free_key:
  332. kfree(key);
  333. err_put:
  334. fdput(f);
  335. return err;
  336. }
  337. /* last field in 'union bpf_attr' used by this command */
  338. #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
  339. static int map_get_next_key(union bpf_attr *attr)
  340. {
  341. void __user *ukey = u64_to_ptr(attr->key);
  342. void __user *unext_key = u64_to_ptr(attr->next_key);
  343. int ufd = attr->map_fd;
  344. struct bpf_map *map;
  345. void *key, *next_key;
  346. struct fd f;
  347. int err;
  348. if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
  349. return -EINVAL;
  350. f = fdget(ufd);
  351. map = __bpf_map_get(f);
  352. if (IS_ERR(map))
  353. return PTR_ERR(map);
  354. err = -ENOMEM;
  355. key = kmalloc(map->key_size, GFP_USER);
  356. if (!key)
  357. goto err_put;
  358. err = -EFAULT;
  359. if (copy_from_user(key, ukey, map->key_size) != 0)
  360. goto free_key;
  361. err = -ENOMEM;
  362. next_key = kmalloc(map->key_size, GFP_USER);
  363. if (!next_key)
  364. goto free_key;
  365. rcu_read_lock();
  366. err = map->ops->map_get_next_key(map, key, next_key);
  367. rcu_read_unlock();
  368. if (err)
  369. goto free_next_key;
  370. err = -EFAULT;
  371. if (copy_to_user(unext_key, next_key, map->key_size) != 0)
  372. goto free_next_key;
  373. err = 0;
  374. free_next_key:
  375. kfree(next_key);
  376. free_key:
  377. kfree(key);
  378. err_put:
  379. fdput(f);
  380. return err;
  381. }
  382. static LIST_HEAD(bpf_prog_types);
  383. static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
  384. {
  385. struct bpf_prog_type_list *tl;
  386. list_for_each_entry(tl, &bpf_prog_types, list_node) {
  387. if (tl->type == type) {
  388. prog->aux->ops = tl->ops;
  389. prog->type = type;
  390. return 0;
  391. }
  392. }
  393. return -EINVAL;
  394. }
  395. void bpf_register_prog_type(struct bpf_prog_type_list *tl)
  396. {
  397. list_add(&tl->list_node, &bpf_prog_types);
  398. }
  399. /* fixup insn->imm field of bpf_call instructions:
  400. * if (insn->imm == BPF_FUNC_map_lookup_elem)
  401. * insn->imm = bpf_map_lookup_elem - __bpf_call_base;
  402. * else if (insn->imm == BPF_FUNC_map_update_elem)
  403. * insn->imm = bpf_map_update_elem - __bpf_call_base;
  404. * else ...
  405. *
  406. * this function is called after eBPF program passed verification
  407. */
  408. static void fixup_bpf_calls(struct bpf_prog *prog)
  409. {
  410. const struct bpf_func_proto *fn;
  411. int i;
  412. for (i = 0; i < prog->len; i++) {
  413. struct bpf_insn *insn = &prog->insnsi[i];
  414. if (insn->code == (BPF_JMP | BPF_CALL)) {
  415. /* we reach here when program has bpf_call instructions
  416. * and it passed bpf_check(), means that
  417. * ops->get_func_proto must have been supplied, check it
  418. */
  419. BUG_ON(!prog->aux->ops->get_func_proto);
  420. if (insn->imm == BPF_FUNC_get_route_realm)
  421. prog->dst_needed = 1;
  422. if (insn->imm == BPF_FUNC_get_prandom_u32)
  423. bpf_user_rnd_init_once();
  424. if (insn->imm == BPF_FUNC_tail_call) {
  425. /* mark bpf_tail_call as different opcode
  426. * to avoid conditional branch in
  427. * interpeter for every normal call
  428. * and to prevent accidental JITing by
  429. * JIT compiler that doesn't support
  430. * bpf_tail_call yet
  431. */
  432. insn->imm = 0;
  433. insn->code |= BPF_X;
  434. continue;
  435. }
  436. fn = prog->aux->ops->get_func_proto(insn->imm);
  437. /* all functions that have prototype and verifier allowed
  438. * programs to call them, must be real in-kernel functions
  439. */
  440. BUG_ON(!fn->func);
  441. insn->imm = fn->func - __bpf_call_base;
  442. }
  443. }
  444. }
  445. /* drop refcnt on maps used by eBPF program and free auxilary data */
  446. static void free_used_maps(struct bpf_prog_aux *aux)
  447. {
  448. int i;
  449. for (i = 0; i < aux->used_map_cnt; i++)
  450. bpf_map_put(aux->used_maps[i]);
  451. kfree(aux->used_maps);
  452. }
  453. static int bpf_prog_charge_memlock(struct bpf_prog *prog)
  454. {
  455. struct user_struct *user = get_current_user();
  456. unsigned long memlock_limit;
  457. memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
  458. atomic_long_add(prog->pages, &user->locked_vm);
  459. if (atomic_long_read(&user->locked_vm) > memlock_limit) {
  460. atomic_long_sub(prog->pages, &user->locked_vm);
  461. free_uid(user);
  462. return -EPERM;
  463. }
  464. prog->aux->user = user;
  465. return 0;
  466. }
  467. static void bpf_prog_uncharge_memlock(struct bpf_prog *prog)
  468. {
  469. struct user_struct *user = prog->aux->user;
  470. atomic_long_sub(prog->pages, &user->locked_vm);
  471. free_uid(user);
  472. }
  473. static void __prog_put_common(struct rcu_head *rcu)
  474. {
  475. struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
  476. free_used_maps(aux);
  477. bpf_prog_uncharge_memlock(aux->prog);
  478. bpf_prog_free(aux->prog);
  479. }
  480. /* version of bpf_prog_put() that is called after a grace period */
  481. void bpf_prog_put_rcu(struct bpf_prog *prog)
  482. {
  483. if (atomic_dec_and_test(&prog->aux->refcnt))
  484. call_rcu(&prog->aux->rcu, __prog_put_common);
  485. }
  486. void bpf_prog_put(struct bpf_prog *prog)
  487. {
  488. if (atomic_dec_and_test(&prog->aux->refcnt))
  489. __prog_put_common(&prog->aux->rcu);
  490. }
  491. EXPORT_SYMBOL_GPL(bpf_prog_put);
  492. static int bpf_prog_release(struct inode *inode, struct file *filp)
  493. {
  494. struct bpf_prog *prog = filp->private_data;
  495. bpf_prog_put_rcu(prog);
  496. return 0;
  497. }
  498. static const struct file_operations bpf_prog_fops = {
  499. .release = bpf_prog_release,
  500. };
  501. int bpf_prog_new_fd(struct bpf_prog *prog)
  502. {
  503. return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
  504. O_RDWR | O_CLOEXEC);
  505. }
  506. static struct bpf_prog *__bpf_prog_get(struct fd f)
  507. {
  508. if (!f.file)
  509. return ERR_PTR(-EBADF);
  510. if (f.file->f_op != &bpf_prog_fops) {
  511. fdput(f);
  512. return ERR_PTR(-EINVAL);
  513. }
  514. return f.file->private_data;
  515. }
  516. /* called by sockets/tracing/seccomp before attaching program to an event
  517. * pairs with bpf_prog_put()
  518. */
  519. struct bpf_prog *bpf_prog_get(u32 ufd)
  520. {
  521. struct fd f = fdget(ufd);
  522. struct bpf_prog *prog;
  523. prog = __bpf_prog_get(f);
  524. if (IS_ERR(prog))
  525. return prog;
  526. atomic_inc(&prog->aux->refcnt);
  527. fdput(f);
  528. return prog;
  529. }
  530. EXPORT_SYMBOL_GPL(bpf_prog_get);
  531. /* last field in 'union bpf_attr' used by this command */
  532. #define BPF_PROG_LOAD_LAST_FIELD kern_version
  533. static int bpf_prog_load(union bpf_attr *attr)
  534. {
  535. enum bpf_prog_type type = attr->prog_type;
  536. struct bpf_prog *prog;
  537. int err;
  538. char license[128];
  539. bool is_gpl;
  540. if (CHECK_ATTR(BPF_PROG_LOAD))
  541. return -EINVAL;
  542. /* copy eBPF program license from user space */
  543. if (strncpy_from_user(license, u64_to_ptr(attr->license),
  544. sizeof(license) - 1) < 0)
  545. return -EFAULT;
  546. license[sizeof(license) - 1] = 0;
  547. /* eBPF programs must be GPL compatible to use GPL-ed functions */
  548. is_gpl = license_is_gpl_compatible(license);
  549. if (attr->insn_cnt >= BPF_MAXINSNS)
  550. return -EINVAL;
  551. if (type == BPF_PROG_TYPE_KPROBE &&
  552. attr->kern_version != LINUX_VERSION_CODE)
  553. return -EINVAL;
  554. if (type != BPF_PROG_TYPE_SOCKET_FILTER && !capable(CAP_SYS_ADMIN))
  555. return -EPERM;
  556. /* plain bpf_prog allocation */
  557. prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
  558. if (!prog)
  559. return -ENOMEM;
  560. err = bpf_prog_charge_memlock(prog);
  561. if (err)
  562. goto free_prog_nouncharge;
  563. prog->len = attr->insn_cnt;
  564. err = -EFAULT;
  565. if (copy_from_user(prog->insns, u64_to_ptr(attr->insns),
  566. prog->len * sizeof(struct bpf_insn)) != 0)
  567. goto free_prog;
  568. prog->orig_prog = NULL;
  569. prog->jited = 0;
  570. atomic_set(&prog->aux->refcnt, 1);
  571. prog->gpl_compatible = is_gpl ? 1 : 0;
  572. /* find program type: socket_filter vs tracing_filter */
  573. err = find_prog_type(type, prog);
  574. if (err < 0)
  575. goto free_prog;
  576. /* run eBPF verifier */
  577. err = bpf_check(&prog, attr);
  578. if (err < 0)
  579. goto free_used_maps;
  580. /* fixup BPF_CALL->imm field */
  581. fixup_bpf_calls(prog);
  582. /* eBPF program is ready to be JITed */
  583. err = bpf_prog_select_runtime(prog);
  584. if (err < 0)
  585. goto free_used_maps;
  586. err = bpf_prog_new_fd(prog);
  587. if (err < 0)
  588. /* failed to allocate fd */
  589. goto free_used_maps;
  590. return err;
  591. free_used_maps:
  592. free_used_maps(prog->aux);
  593. free_prog:
  594. bpf_prog_uncharge_memlock(prog);
  595. free_prog_nouncharge:
  596. bpf_prog_free(prog);
  597. return err;
  598. }
  599. #define BPF_OBJ_LAST_FIELD bpf_fd
  600. static int bpf_obj_pin(const union bpf_attr *attr)
  601. {
  602. if (CHECK_ATTR(BPF_OBJ))
  603. return -EINVAL;
  604. return bpf_obj_pin_user(attr->bpf_fd, u64_to_ptr(attr->pathname));
  605. }
  606. static int bpf_obj_get(const union bpf_attr *attr)
  607. {
  608. if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0)
  609. return -EINVAL;
  610. return bpf_obj_get_user(u64_to_ptr(attr->pathname));
  611. }
  612. SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
  613. {
  614. union bpf_attr attr = {};
  615. int err;
  616. if (!capable(CAP_SYS_ADMIN) && sysctl_unprivileged_bpf_disabled)
  617. return -EPERM;
  618. if (!access_ok(VERIFY_READ, uattr, 1))
  619. return -EFAULT;
  620. if (size > PAGE_SIZE) /* silly large */
  621. return -E2BIG;
  622. /* If we're handed a bigger struct than we know of,
  623. * ensure all the unknown bits are 0 - i.e. new
  624. * user-space does not rely on any kernel feature
  625. * extensions we dont know about yet.
  626. */
  627. if (size > sizeof(attr)) {
  628. unsigned char __user *addr;
  629. unsigned char __user *end;
  630. unsigned char val;
  631. addr = (void __user *)uattr + sizeof(attr);
  632. end = (void __user *)uattr + size;
  633. for (; addr < end; addr++) {
  634. err = get_user(val, addr);
  635. if (err)
  636. return err;
  637. if (val)
  638. return -E2BIG;
  639. }
  640. size = sizeof(attr);
  641. }
  642. /* copy attributes from user space, may be less than sizeof(bpf_attr) */
  643. if (copy_from_user(&attr, uattr, size) != 0)
  644. return -EFAULT;
  645. switch (cmd) {
  646. case BPF_MAP_CREATE:
  647. err = map_create(&attr);
  648. break;
  649. case BPF_MAP_LOOKUP_ELEM:
  650. err = map_lookup_elem(&attr);
  651. break;
  652. case BPF_MAP_UPDATE_ELEM:
  653. err = map_update_elem(&attr);
  654. break;
  655. case BPF_MAP_DELETE_ELEM:
  656. err = map_delete_elem(&attr);
  657. break;
  658. case BPF_MAP_GET_NEXT_KEY:
  659. err = map_get_next_key(&attr);
  660. break;
  661. case BPF_PROG_LOAD:
  662. err = bpf_prog_load(&attr);
  663. break;
  664. case BPF_OBJ_PIN:
  665. err = bpf_obj_pin(&attr);
  666. break;
  667. case BPF_OBJ_GET:
  668. err = bpf_obj_get(&attr);
  669. break;
  670. default:
  671. err = -EINVAL;
  672. break;
  673. }
  674. return err;
  675. }