syscall.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991
  1. /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
  2. *
  3. * This program is free software; you can redistribute it and/or
  4. * modify it under the terms of version 2 of the GNU General Public
  5. * License as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful, but
  8. * WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  10. * General Public License for more details.
  11. */
  12. #include <linux/bpf.h>
  13. #include <linux/syscalls.h>
  14. #include <linux/slab.h>
  15. #include <linux/anon_inodes.h>
  16. #include <linux/file.h>
  17. #include <linux/license.h>
  18. #include <linux/filter.h>
  19. #include <linux/version.h>
  20. #include <linux/kernel.h>
  21. DEFINE_PER_CPU(int, bpf_prog_active);
  22. int sysctl_unprivileged_bpf_disabled __read_mostly;
  23. static LIST_HEAD(bpf_map_types);
  24. static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
  25. {
  26. struct bpf_map_type_list *tl;
  27. struct bpf_map *map;
  28. list_for_each_entry(tl, &bpf_map_types, list_node) {
  29. if (tl->type == attr->map_type) {
  30. map = tl->ops->map_alloc(attr);
  31. if (IS_ERR(map))
  32. return map;
  33. map->ops = tl->ops;
  34. map->map_type = attr->map_type;
  35. return map;
  36. }
  37. }
  38. return ERR_PTR(-EINVAL);
  39. }
  40. /* boot time registration of different map implementations */
  41. void bpf_register_map_type(struct bpf_map_type_list *tl)
  42. {
  43. list_add(&tl->list_node, &bpf_map_types);
  44. }
  45. int bpf_map_precharge_memlock(u32 pages)
  46. {
  47. struct user_struct *user = get_current_user();
  48. unsigned long memlock_limit, cur;
  49. memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
  50. cur = atomic_long_read(&user->locked_vm);
  51. free_uid(user);
  52. if (cur + pages > memlock_limit)
  53. return -EPERM;
  54. return 0;
  55. }
  56. static int bpf_map_charge_memlock(struct bpf_map *map)
  57. {
  58. struct user_struct *user = get_current_user();
  59. unsigned long memlock_limit;
  60. memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
  61. atomic_long_add(map->pages, &user->locked_vm);
  62. if (atomic_long_read(&user->locked_vm) > memlock_limit) {
  63. atomic_long_sub(map->pages, &user->locked_vm);
  64. free_uid(user);
  65. return -EPERM;
  66. }
  67. map->user = user;
  68. return 0;
  69. }
  70. static void bpf_map_uncharge_memlock(struct bpf_map *map)
  71. {
  72. struct user_struct *user = map->user;
  73. atomic_long_sub(map->pages, &user->locked_vm);
  74. free_uid(user);
  75. }
  76. /* called from workqueue */
  77. static void bpf_map_free_deferred(struct work_struct *work)
  78. {
  79. struct bpf_map *map = container_of(work, struct bpf_map, work);
  80. bpf_map_uncharge_memlock(map);
  81. /* implementation dependent freeing */
  82. map->ops->map_free(map);
  83. }
  84. static void bpf_map_put_uref(struct bpf_map *map)
  85. {
  86. if (atomic_dec_and_test(&map->usercnt)) {
  87. if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY)
  88. bpf_fd_array_map_clear(map);
  89. }
  90. }
  91. /* decrement map refcnt and schedule it for freeing via workqueue
  92. * (unrelying map implementation ops->map_free() might sleep)
  93. */
  94. void bpf_map_put(struct bpf_map *map)
  95. {
  96. if (atomic_dec_and_test(&map->refcnt)) {
  97. INIT_WORK(&map->work, bpf_map_free_deferred);
  98. schedule_work(&map->work);
  99. }
  100. }
  101. void bpf_map_put_with_uref(struct bpf_map *map)
  102. {
  103. bpf_map_put_uref(map);
  104. bpf_map_put(map);
  105. }
  106. static int bpf_map_release(struct inode *inode, struct file *filp)
  107. {
  108. struct bpf_map *map = filp->private_data;
  109. if (map->ops->map_release)
  110. map->ops->map_release(map, filp);
  111. bpf_map_put_with_uref(map);
  112. return 0;
  113. }
  114. #ifdef CONFIG_PROC_FS
  115. static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
  116. {
  117. const struct bpf_map *map = filp->private_data;
  118. seq_printf(m,
  119. "map_type:\t%u\n"
  120. "key_size:\t%u\n"
  121. "value_size:\t%u\n"
  122. "max_entries:\t%u\n"
  123. "map_flags:\t%#x\n",
  124. map->map_type,
  125. map->key_size,
  126. map->value_size,
  127. map->max_entries,
  128. map->map_flags);
  129. }
  130. #endif
  131. static const struct file_operations bpf_map_fops = {
  132. #ifdef CONFIG_PROC_FS
  133. .show_fdinfo = bpf_map_show_fdinfo,
  134. #endif
  135. .release = bpf_map_release,
  136. };
  137. int bpf_map_new_fd(struct bpf_map *map)
  138. {
  139. return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
  140. O_RDWR | O_CLOEXEC);
  141. }
  142. /* helper macro to check that unused fields 'union bpf_attr' are zero */
  143. #define CHECK_ATTR(CMD) \
  144. memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
  145. sizeof(attr->CMD##_LAST_FIELD), 0, \
  146. sizeof(*attr) - \
  147. offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
  148. sizeof(attr->CMD##_LAST_FIELD)) != NULL
  149. #define BPF_MAP_CREATE_LAST_FIELD map_flags
  150. /* called via syscall */
  151. static int map_create(union bpf_attr *attr)
  152. {
  153. struct bpf_map *map;
  154. int err;
  155. err = CHECK_ATTR(BPF_MAP_CREATE);
  156. if (err)
  157. return -EINVAL;
  158. /* find map type and init map: hashtable vs rbtree vs bloom vs ... */
  159. map = find_and_alloc_map(attr);
  160. if (IS_ERR(map))
  161. return PTR_ERR(map);
  162. atomic_set(&map->refcnt, 1);
  163. atomic_set(&map->usercnt, 1);
  164. err = bpf_map_charge_memlock(map);
  165. if (err)
  166. goto free_map_nouncharge;
  167. err = bpf_map_new_fd(map);
  168. if (err < 0)
  169. /* failed to allocate fd */
  170. goto free_map;
  171. return err;
  172. free_map:
  173. bpf_map_uncharge_memlock(map);
  174. free_map_nouncharge:
  175. map->ops->map_free(map);
  176. return err;
  177. }
  178. /* if error is returned, fd is released.
  179. * On success caller should complete fd access with matching fdput()
  180. */
  181. struct bpf_map *__bpf_map_get(struct fd f)
  182. {
  183. if (!f.file)
  184. return ERR_PTR(-EBADF);
  185. if (f.file->f_op != &bpf_map_fops) {
  186. fdput(f);
  187. return ERR_PTR(-EINVAL);
  188. }
  189. return f.file->private_data;
  190. }
  191. /* prog's and map's refcnt limit */
  192. #define BPF_MAX_REFCNT 32768
  193. struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref)
  194. {
  195. if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) {
  196. atomic_dec(&map->refcnt);
  197. return ERR_PTR(-EBUSY);
  198. }
  199. if (uref)
  200. atomic_inc(&map->usercnt);
  201. return map;
  202. }
  203. struct bpf_map *bpf_map_get_with_uref(u32 ufd)
  204. {
  205. struct fd f = fdget(ufd);
  206. struct bpf_map *map;
  207. map = __bpf_map_get(f);
  208. if (IS_ERR(map))
  209. return map;
  210. map = bpf_map_inc(map, true);
  211. fdput(f);
  212. return map;
  213. }
  214. int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
  215. {
  216. return -ENOTSUPP;
  217. }
  218. /* last field in 'union bpf_attr' used by this command */
  219. #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD value
  220. static int map_lookup_elem(union bpf_attr *attr)
  221. {
  222. void __user *ukey = u64_to_user_ptr(attr->key);
  223. void __user *uvalue = u64_to_user_ptr(attr->value);
  224. int ufd = attr->map_fd;
  225. struct bpf_map *map;
  226. void *key, *value, *ptr;
  227. u32 value_size;
  228. struct fd f;
  229. int err;
  230. if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
  231. return -EINVAL;
  232. f = fdget(ufd);
  233. map = __bpf_map_get(f);
  234. if (IS_ERR(map))
  235. return PTR_ERR(map);
  236. err = -ENOMEM;
  237. key = kmalloc(map->key_size, GFP_USER);
  238. if (!key)
  239. goto err_put;
  240. err = -EFAULT;
  241. if (copy_from_user(key, ukey, map->key_size) != 0)
  242. goto free_key;
  243. if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
  244. map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
  245. map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
  246. value_size = round_up(map->value_size, 8) * num_possible_cpus();
  247. else
  248. value_size = map->value_size;
  249. err = -ENOMEM;
  250. value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
  251. if (!value)
  252. goto free_key;
  253. if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
  254. map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
  255. err = bpf_percpu_hash_copy(map, key, value);
  256. } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
  257. err = bpf_percpu_array_copy(map, key, value);
  258. } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
  259. err = bpf_stackmap_copy(map, key, value);
  260. } else {
  261. rcu_read_lock();
  262. ptr = map->ops->map_lookup_elem(map, key);
  263. if (ptr)
  264. memcpy(value, ptr, value_size);
  265. rcu_read_unlock();
  266. err = ptr ? 0 : -ENOENT;
  267. }
  268. if (err)
  269. goto free_value;
  270. err = -EFAULT;
  271. if (copy_to_user(uvalue, value, value_size) != 0)
  272. goto free_value;
  273. err = 0;
  274. free_value:
  275. kfree(value);
  276. free_key:
  277. kfree(key);
  278. err_put:
  279. fdput(f);
  280. return err;
  281. }
  282. #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
  283. static int map_update_elem(union bpf_attr *attr)
  284. {
  285. void __user *ukey = u64_to_user_ptr(attr->key);
  286. void __user *uvalue = u64_to_user_ptr(attr->value);
  287. int ufd = attr->map_fd;
  288. struct bpf_map *map;
  289. void *key, *value;
  290. u32 value_size;
  291. struct fd f;
  292. int err;
  293. if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
  294. return -EINVAL;
  295. f = fdget(ufd);
  296. map = __bpf_map_get(f);
  297. if (IS_ERR(map))
  298. return PTR_ERR(map);
  299. err = -ENOMEM;
  300. key = kmalloc(map->key_size, GFP_USER);
  301. if (!key)
  302. goto err_put;
  303. err = -EFAULT;
  304. if (copy_from_user(key, ukey, map->key_size) != 0)
  305. goto free_key;
  306. if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
  307. map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
  308. map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
  309. value_size = round_up(map->value_size, 8) * num_possible_cpus();
  310. else
  311. value_size = map->value_size;
  312. err = -ENOMEM;
  313. value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
  314. if (!value)
  315. goto free_key;
  316. err = -EFAULT;
  317. if (copy_from_user(value, uvalue, value_size) != 0)
  318. goto free_value;
  319. /* must increment bpf_prog_active to avoid kprobe+bpf triggering from
  320. * inside bpf map update or delete otherwise deadlocks are possible
  321. */
  322. preempt_disable();
  323. __this_cpu_inc(bpf_prog_active);
  324. if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
  325. map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
  326. err = bpf_percpu_hash_update(map, key, value, attr->flags);
  327. } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
  328. err = bpf_percpu_array_update(map, key, value, attr->flags);
  329. } else if (map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY ||
  330. map->map_type == BPF_MAP_TYPE_PROG_ARRAY ||
  331. map->map_type == BPF_MAP_TYPE_CGROUP_ARRAY) {
  332. rcu_read_lock();
  333. err = bpf_fd_array_map_update_elem(map, f.file, key, value,
  334. attr->flags);
  335. rcu_read_unlock();
  336. } else {
  337. rcu_read_lock();
  338. err = map->ops->map_update_elem(map, key, value, attr->flags);
  339. rcu_read_unlock();
  340. }
  341. __this_cpu_dec(bpf_prog_active);
  342. preempt_enable();
  343. free_value:
  344. kfree(value);
  345. free_key:
  346. kfree(key);
  347. err_put:
  348. fdput(f);
  349. return err;
  350. }
  351. #define BPF_MAP_DELETE_ELEM_LAST_FIELD key
  352. static int map_delete_elem(union bpf_attr *attr)
  353. {
  354. void __user *ukey = u64_to_user_ptr(attr->key);
  355. int ufd = attr->map_fd;
  356. struct bpf_map *map;
  357. struct fd f;
  358. void *key;
  359. int err;
  360. if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
  361. return -EINVAL;
  362. f = fdget(ufd);
  363. map = __bpf_map_get(f);
  364. if (IS_ERR(map))
  365. return PTR_ERR(map);
  366. err = -ENOMEM;
  367. key = kmalloc(map->key_size, GFP_USER);
  368. if (!key)
  369. goto err_put;
  370. err = -EFAULT;
  371. if (copy_from_user(key, ukey, map->key_size) != 0)
  372. goto free_key;
  373. preempt_disable();
  374. __this_cpu_inc(bpf_prog_active);
  375. rcu_read_lock();
  376. err = map->ops->map_delete_elem(map, key);
  377. rcu_read_unlock();
  378. __this_cpu_dec(bpf_prog_active);
  379. preempt_enable();
  380. free_key:
  381. kfree(key);
  382. err_put:
  383. fdput(f);
  384. return err;
  385. }
  386. /* last field in 'union bpf_attr' used by this command */
  387. #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
  388. static int map_get_next_key(union bpf_attr *attr)
  389. {
  390. void __user *ukey = u64_to_user_ptr(attr->key);
  391. void __user *unext_key = u64_to_user_ptr(attr->next_key);
  392. int ufd = attr->map_fd;
  393. struct bpf_map *map;
  394. void *key, *next_key;
  395. struct fd f;
  396. int err;
  397. if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
  398. return -EINVAL;
  399. f = fdget(ufd);
  400. map = __bpf_map_get(f);
  401. if (IS_ERR(map))
  402. return PTR_ERR(map);
  403. err = -ENOMEM;
  404. key = kmalloc(map->key_size, GFP_USER);
  405. if (!key)
  406. goto err_put;
  407. err = -EFAULT;
  408. if (copy_from_user(key, ukey, map->key_size) != 0)
  409. goto free_key;
  410. err = -ENOMEM;
  411. next_key = kmalloc(map->key_size, GFP_USER);
  412. if (!next_key)
  413. goto free_key;
  414. rcu_read_lock();
  415. err = map->ops->map_get_next_key(map, key, next_key);
  416. rcu_read_unlock();
  417. if (err)
  418. goto free_next_key;
  419. err = -EFAULT;
  420. if (copy_to_user(unext_key, next_key, map->key_size) != 0)
  421. goto free_next_key;
  422. err = 0;
  423. free_next_key:
  424. kfree(next_key);
  425. free_key:
  426. kfree(key);
  427. err_put:
  428. fdput(f);
  429. return err;
  430. }
  431. static LIST_HEAD(bpf_prog_types);
  432. static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
  433. {
  434. struct bpf_prog_type_list *tl;
  435. list_for_each_entry(tl, &bpf_prog_types, list_node) {
  436. if (tl->type == type) {
  437. prog->aux->ops = tl->ops;
  438. prog->type = type;
  439. return 0;
  440. }
  441. }
  442. return -EINVAL;
  443. }
  444. void bpf_register_prog_type(struct bpf_prog_type_list *tl)
  445. {
  446. list_add(&tl->list_node, &bpf_prog_types);
  447. }
  448. /* fixup insn->imm field of bpf_call instructions:
  449. * if (insn->imm == BPF_FUNC_map_lookup_elem)
  450. * insn->imm = bpf_map_lookup_elem - __bpf_call_base;
  451. * else if (insn->imm == BPF_FUNC_map_update_elem)
  452. * insn->imm = bpf_map_update_elem - __bpf_call_base;
  453. * else ...
  454. *
  455. * this function is called after eBPF program passed verification
  456. */
  457. static void fixup_bpf_calls(struct bpf_prog *prog)
  458. {
  459. const struct bpf_func_proto *fn;
  460. int i;
  461. for (i = 0; i < prog->len; i++) {
  462. struct bpf_insn *insn = &prog->insnsi[i];
  463. if (insn->code == (BPF_JMP | BPF_CALL)) {
  464. /* we reach here when program has bpf_call instructions
  465. * and it passed bpf_check(), means that
  466. * ops->get_func_proto must have been supplied, check it
  467. */
  468. BUG_ON(!prog->aux->ops->get_func_proto);
  469. if (insn->imm == BPF_FUNC_get_route_realm)
  470. prog->dst_needed = 1;
  471. if (insn->imm == BPF_FUNC_get_prandom_u32)
  472. bpf_user_rnd_init_once();
  473. if (insn->imm == BPF_FUNC_tail_call) {
  474. /* mark bpf_tail_call as different opcode
  475. * to avoid conditional branch in
  476. * interpeter for every normal call
  477. * and to prevent accidental JITing by
  478. * JIT compiler that doesn't support
  479. * bpf_tail_call yet
  480. */
  481. insn->imm = 0;
  482. insn->code |= BPF_X;
  483. continue;
  484. }
  485. fn = prog->aux->ops->get_func_proto(insn->imm);
  486. /* all functions that have prototype and verifier allowed
  487. * programs to call them, must be real in-kernel functions
  488. */
  489. BUG_ON(!fn->func);
  490. insn->imm = fn->func - __bpf_call_base;
  491. }
  492. }
  493. }
  494. /* drop refcnt on maps used by eBPF program and free auxilary data */
  495. static void free_used_maps(struct bpf_prog_aux *aux)
  496. {
  497. int i;
  498. for (i = 0; i < aux->used_map_cnt; i++)
  499. bpf_map_put(aux->used_maps[i]);
  500. kfree(aux->used_maps);
  501. }
  502. static int bpf_prog_charge_memlock(struct bpf_prog *prog)
  503. {
  504. struct user_struct *user = get_current_user();
  505. unsigned long memlock_limit;
  506. memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
  507. atomic_long_add(prog->pages, &user->locked_vm);
  508. if (atomic_long_read(&user->locked_vm) > memlock_limit) {
  509. atomic_long_sub(prog->pages, &user->locked_vm);
  510. free_uid(user);
  511. return -EPERM;
  512. }
  513. prog->aux->user = user;
  514. return 0;
  515. }
  516. static void bpf_prog_uncharge_memlock(struct bpf_prog *prog)
  517. {
  518. struct user_struct *user = prog->aux->user;
  519. atomic_long_sub(prog->pages, &user->locked_vm);
  520. free_uid(user);
  521. }
  522. static void __bpf_prog_put_rcu(struct rcu_head *rcu)
  523. {
  524. struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
  525. free_used_maps(aux);
  526. bpf_prog_uncharge_memlock(aux->prog);
  527. bpf_prog_free(aux->prog);
  528. }
  529. void bpf_prog_put(struct bpf_prog *prog)
  530. {
  531. if (atomic_dec_and_test(&prog->aux->refcnt))
  532. call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
  533. }
  534. EXPORT_SYMBOL_GPL(bpf_prog_put);
  535. static int bpf_prog_release(struct inode *inode, struct file *filp)
  536. {
  537. struct bpf_prog *prog = filp->private_data;
  538. bpf_prog_put(prog);
  539. return 0;
  540. }
  541. static const struct file_operations bpf_prog_fops = {
  542. .release = bpf_prog_release,
  543. };
  544. int bpf_prog_new_fd(struct bpf_prog *prog)
  545. {
  546. return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
  547. O_RDWR | O_CLOEXEC);
  548. }
  549. static struct bpf_prog *____bpf_prog_get(struct fd f)
  550. {
  551. if (!f.file)
  552. return ERR_PTR(-EBADF);
  553. if (f.file->f_op != &bpf_prog_fops) {
  554. fdput(f);
  555. return ERR_PTR(-EINVAL);
  556. }
  557. return f.file->private_data;
  558. }
  559. struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i)
  560. {
  561. if (atomic_add_return(i, &prog->aux->refcnt) > BPF_MAX_REFCNT) {
  562. atomic_sub(i, &prog->aux->refcnt);
  563. return ERR_PTR(-EBUSY);
  564. }
  565. return prog;
  566. }
  567. EXPORT_SYMBOL_GPL(bpf_prog_add);
  568. void bpf_prog_sub(struct bpf_prog *prog, int i)
  569. {
  570. /* Only to be used for undoing previous bpf_prog_add() in some
  571. * error path. We still know that another entity in our call
  572. * path holds a reference to the program, thus atomic_sub() can
  573. * be safely used in such cases!
  574. */
  575. WARN_ON(atomic_sub_return(i, &prog->aux->refcnt) == 0);
  576. }
  577. EXPORT_SYMBOL_GPL(bpf_prog_sub);
  578. struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
  579. {
  580. return bpf_prog_add(prog, 1);
  581. }
  582. EXPORT_SYMBOL_GPL(bpf_prog_inc);
  583. static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *type)
  584. {
  585. struct fd f = fdget(ufd);
  586. struct bpf_prog *prog;
  587. prog = ____bpf_prog_get(f);
  588. if (IS_ERR(prog))
  589. return prog;
  590. if (type && prog->type != *type) {
  591. prog = ERR_PTR(-EINVAL);
  592. goto out;
  593. }
  594. prog = bpf_prog_inc(prog);
  595. out:
  596. fdput(f);
  597. return prog;
  598. }
  599. struct bpf_prog *bpf_prog_get(u32 ufd)
  600. {
  601. return __bpf_prog_get(ufd, NULL);
  602. }
  603. struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type)
  604. {
  605. return __bpf_prog_get(ufd, &type);
  606. }
  607. EXPORT_SYMBOL_GPL(bpf_prog_get_type);
  608. /* last field in 'union bpf_attr' used by this command */
  609. #define BPF_PROG_LOAD_LAST_FIELD kern_version
  610. static int bpf_prog_load(union bpf_attr *attr)
  611. {
  612. enum bpf_prog_type type = attr->prog_type;
  613. struct bpf_prog *prog;
  614. int err;
  615. char license[128];
  616. bool is_gpl;
  617. if (CHECK_ATTR(BPF_PROG_LOAD))
  618. return -EINVAL;
  619. /* copy eBPF program license from user space */
  620. if (strncpy_from_user(license, u64_to_user_ptr(attr->license),
  621. sizeof(license) - 1) < 0)
  622. return -EFAULT;
  623. license[sizeof(license) - 1] = 0;
  624. /* eBPF programs must be GPL compatible to use GPL-ed functions */
  625. is_gpl = license_is_gpl_compatible(license);
  626. if (attr->insn_cnt >= BPF_MAXINSNS)
  627. return -EINVAL;
  628. if (type == BPF_PROG_TYPE_KPROBE &&
  629. attr->kern_version != LINUX_VERSION_CODE)
  630. return -EINVAL;
  631. if (type != BPF_PROG_TYPE_SOCKET_FILTER && !capable(CAP_SYS_ADMIN))
  632. return -EPERM;
  633. /* plain bpf_prog allocation */
  634. prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
  635. if (!prog)
  636. return -ENOMEM;
  637. err = bpf_prog_charge_memlock(prog);
  638. if (err)
  639. goto free_prog_nouncharge;
  640. prog->len = attr->insn_cnt;
  641. err = -EFAULT;
  642. if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns),
  643. prog->len * sizeof(struct bpf_insn)) != 0)
  644. goto free_prog;
  645. prog->orig_prog = NULL;
  646. prog->jited = 0;
  647. atomic_set(&prog->aux->refcnt, 1);
  648. prog->gpl_compatible = is_gpl ? 1 : 0;
  649. /* find program type: socket_filter vs tracing_filter */
  650. err = find_prog_type(type, prog);
  651. if (err < 0)
  652. goto free_prog;
  653. /* run eBPF verifier */
  654. err = bpf_check(&prog, attr);
  655. if (err < 0)
  656. goto free_used_maps;
  657. /* fixup BPF_CALL->imm field */
  658. fixup_bpf_calls(prog);
  659. /* eBPF program is ready to be JITed */
  660. prog = bpf_prog_select_runtime(prog, &err);
  661. if (err < 0)
  662. goto free_used_maps;
  663. err = bpf_prog_new_fd(prog);
  664. if (err < 0)
  665. /* failed to allocate fd */
  666. goto free_used_maps;
  667. return err;
  668. free_used_maps:
  669. free_used_maps(prog->aux);
  670. free_prog:
  671. bpf_prog_uncharge_memlock(prog);
  672. free_prog_nouncharge:
  673. bpf_prog_free(prog);
  674. return err;
  675. }
  676. #define BPF_OBJ_LAST_FIELD bpf_fd
  677. static int bpf_obj_pin(const union bpf_attr *attr)
  678. {
  679. if (CHECK_ATTR(BPF_OBJ))
  680. return -EINVAL;
  681. return bpf_obj_pin_user(attr->bpf_fd, u64_to_user_ptr(attr->pathname));
  682. }
  683. static int bpf_obj_get(const union bpf_attr *attr)
  684. {
  685. if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0)
  686. return -EINVAL;
  687. return bpf_obj_get_user(u64_to_user_ptr(attr->pathname));
  688. }
  689. #ifdef CONFIG_CGROUP_BPF
  690. #define BPF_PROG_ATTACH_LAST_FIELD attach_type
  691. static int bpf_prog_attach(const union bpf_attr *attr)
  692. {
  693. struct bpf_prog *prog;
  694. struct cgroup *cgrp;
  695. if (!capable(CAP_NET_ADMIN))
  696. return -EPERM;
  697. if (CHECK_ATTR(BPF_PROG_ATTACH))
  698. return -EINVAL;
  699. switch (attr->attach_type) {
  700. case BPF_CGROUP_INET_INGRESS:
  701. case BPF_CGROUP_INET_EGRESS:
  702. prog = bpf_prog_get_type(attr->attach_bpf_fd,
  703. BPF_PROG_TYPE_CGROUP_SKB);
  704. if (IS_ERR(prog))
  705. return PTR_ERR(prog);
  706. cgrp = cgroup_get_from_fd(attr->target_fd);
  707. if (IS_ERR(cgrp)) {
  708. bpf_prog_put(prog);
  709. return PTR_ERR(cgrp);
  710. }
  711. cgroup_bpf_update(cgrp, prog, attr->attach_type);
  712. cgroup_put(cgrp);
  713. break;
  714. default:
  715. return -EINVAL;
  716. }
  717. return 0;
  718. }
  719. #define BPF_PROG_DETACH_LAST_FIELD attach_type
  720. static int bpf_prog_detach(const union bpf_attr *attr)
  721. {
  722. struct cgroup *cgrp;
  723. if (!capable(CAP_NET_ADMIN))
  724. return -EPERM;
  725. if (CHECK_ATTR(BPF_PROG_DETACH))
  726. return -EINVAL;
  727. switch (attr->attach_type) {
  728. case BPF_CGROUP_INET_INGRESS:
  729. case BPF_CGROUP_INET_EGRESS:
  730. cgrp = cgroup_get_from_fd(attr->target_fd);
  731. if (IS_ERR(cgrp))
  732. return PTR_ERR(cgrp);
  733. cgroup_bpf_update(cgrp, NULL, attr->attach_type);
  734. cgroup_put(cgrp);
  735. break;
  736. default:
  737. return -EINVAL;
  738. }
  739. return 0;
  740. }
  741. #endif /* CONFIG_CGROUP_BPF */
  742. SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
  743. {
  744. union bpf_attr attr = {};
  745. int err;
  746. if (!capable(CAP_SYS_ADMIN) && sysctl_unprivileged_bpf_disabled)
  747. return -EPERM;
  748. if (!access_ok(VERIFY_READ, uattr, 1))
  749. return -EFAULT;
  750. if (size > PAGE_SIZE) /* silly large */
  751. return -E2BIG;
  752. /* If we're handed a bigger struct than we know of,
  753. * ensure all the unknown bits are 0 - i.e. new
  754. * user-space does not rely on any kernel feature
  755. * extensions we dont know about yet.
  756. */
  757. if (size > sizeof(attr)) {
  758. unsigned char __user *addr;
  759. unsigned char __user *end;
  760. unsigned char val;
  761. addr = (void __user *)uattr + sizeof(attr);
  762. end = (void __user *)uattr + size;
  763. for (; addr < end; addr++) {
  764. err = get_user(val, addr);
  765. if (err)
  766. return err;
  767. if (val)
  768. return -E2BIG;
  769. }
  770. size = sizeof(attr);
  771. }
  772. /* copy attributes from user space, may be less than sizeof(bpf_attr) */
  773. if (copy_from_user(&attr, uattr, size) != 0)
  774. return -EFAULT;
  775. switch (cmd) {
  776. case BPF_MAP_CREATE:
  777. err = map_create(&attr);
  778. break;
  779. case BPF_MAP_LOOKUP_ELEM:
  780. err = map_lookup_elem(&attr);
  781. break;
  782. case BPF_MAP_UPDATE_ELEM:
  783. err = map_update_elem(&attr);
  784. break;
  785. case BPF_MAP_DELETE_ELEM:
  786. err = map_delete_elem(&attr);
  787. break;
  788. case BPF_MAP_GET_NEXT_KEY:
  789. err = map_get_next_key(&attr);
  790. break;
  791. case BPF_PROG_LOAD:
  792. err = bpf_prog_load(&attr);
  793. break;
  794. case BPF_OBJ_PIN:
  795. err = bpf_obj_pin(&attr);
  796. break;
  797. case BPF_OBJ_GET:
  798. err = bpf_obj_get(&attr);
  799. break;
  800. #ifdef CONFIG_CGROUP_BPF
  801. case BPF_PROG_ATTACH:
  802. err = bpf_prog_attach(&attr);
  803. break;
  804. case BPF_PROG_DETACH:
  805. err = bpf_prog_detach(&attr);
  806. break;
  807. #endif
  808. default:
  809. err = -EINVAL;
  810. break;
  811. }
  812. return err;
  813. }