syscall.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104
  1. /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
  2. *
  3. * This program is free software; you can redistribute it and/or
  4. * modify it under the terms of version 2 of the GNU General Public
  5. * License as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful, but
  8. * WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  10. * General Public License for more details.
  11. */
  12. #include <linux/bpf.h>
  13. #include <linux/bpf_trace.h>
  14. #include <linux/syscalls.h>
  15. #include <linux/slab.h>
  16. #include <linux/vmalloc.h>
  17. #include <linux/mmzone.h>
  18. #include <linux/anon_inodes.h>
  19. #include <linux/file.h>
  20. #include <linux/license.h>
  21. #include <linux/filter.h>
  22. #include <linux/version.h>
  23. #include <linux/kernel.h>
  24. DEFINE_PER_CPU(int, bpf_prog_active);
  25. int sysctl_unprivileged_bpf_disabled __read_mostly;
  26. static LIST_HEAD(bpf_map_types);
  27. static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
  28. {
  29. struct bpf_map_type_list *tl;
  30. struct bpf_map *map;
  31. list_for_each_entry(tl, &bpf_map_types, list_node) {
  32. if (tl->type == attr->map_type) {
  33. map = tl->ops->map_alloc(attr);
  34. if (IS_ERR(map))
  35. return map;
  36. map->ops = tl->ops;
  37. map->map_type = attr->map_type;
  38. return map;
  39. }
  40. }
  41. return ERR_PTR(-EINVAL);
  42. }
  43. /* boot time registration of different map implementations */
  44. void bpf_register_map_type(struct bpf_map_type_list *tl)
  45. {
  46. list_add(&tl->list_node, &bpf_map_types);
  47. }
  48. void *bpf_map_area_alloc(size_t size)
  49. {
  50. /* We definitely need __GFP_NORETRY, so OOM killer doesn't
  51. * trigger under memory pressure as we really just want to
  52. * fail instead.
  53. */
  54. const gfp_t flags = __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO;
  55. void *area;
  56. if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
  57. area = kmalloc(size, GFP_USER | flags);
  58. if (area != NULL)
  59. return area;
  60. }
  61. return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | flags,
  62. PAGE_KERNEL);
  63. }
  64. void bpf_map_area_free(void *area)
  65. {
  66. kvfree(area);
  67. }
  68. int bpf_map_precharge_memlock(u32 pages)
  69. {
  70. struct user_struct *user = get_current_user();
  71. unsigned long memlock_limit, cur;
  72. memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
  73. cur = atomic_long_read(&user->locked_vm);
  74. free_uid(user);
  75. if (cur + pages > memlock_limit)
  76. return -EPERM;
  77. return 0;
  78. }
  79. static int bpf_map_charge_memlock(struct bpf_map *map)
  80. {
  81. struct user_struct *user = get_current_user();
  82. unsigned long memlock_limit;
  83. memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
  84. atomic_long_add(map->pages, &user->locked_vm);
  85. if (atomic_long_read(&user->locked_vm) > memlock_limit) {
  86. atomic_long_sub(map->pages, &user->locked_vm);
  87. free_uid(user);
  88. return -EPERM;
  89. }
  90. map->user = user;
  91. return 0;
  92. }
  93. static void bpf_map_uncharge_memlock(struct bpf_map *map)
  94. {
  95. struct user_struct *user = map->user;
  96. atomic_long_sub(map->pages, &user->locked_vm);
  97. free_uid(user);
  98. }
  99. /* called from workqueue */
  100. static void bpf_map_free_deferred(struct work_struct *work)
  101. {
  102. struct bpf_map *map = container_of(work, struct bpf_map, work);
  103. bpf_map_uncharge_memlock(map);
  104. /* implementation dependent freeing */
  105. map->ops->map_free(map);
  106. }
  107. static void bpf_map_put_uref(struct bpf_map *map)
  108. {
  109. if (atomic_dec_and_test(&map->usercnt)) {
  110. if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY)
  111. bpf_fd_array_map_clear(map);
  112. }
  113. }
  114. /* decrement map refcnt and schedule it for freeing via workqueue
  115. * (unrelying map implementation ops->map_free() might sleep)
  116. */
  117. void bpf_map_put(struct bpf_map *map)
  118. {
  119. if (atomic_dec_and_test(&map->refcnt)) {
  120. INIT_WORK(&map->work, bpf_map_free_deferred);
  121. schedule_work(&map->work);
  122. }
  123. }
  124. void bpf_map_put_with_uref(struct bpf_map *map)
  125. {
  126. bpf_map_put_uref(map);
  127. bpf_map_put(map);
  128. }
  129. static int bpf_map_release(struct inode *inode, struct file *filp)
  130. {
  131. struct bpf_map *map = filp->private_data;
  132. if (map->ops->map_release)
  133. map->ops->map_release(map, filp);
  134. bpf_map_put_with_uref(map);
  135. return 0;
  136. }
  137. #ifdef CONFIG_PROC_FS
  138. static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
  139. {
  140. const struct bpf_map *map = filp->private_data;
  141. const struct bpf_array *array;
  142. u32 owner_prog_type = 0;
  143. if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) {
  144. array = container_of(map, struct bpf_array, map);
  145. owner_prog_type = array->owner_prog_type;
  146. }
  147. seq_printf(m,
  148. "map_type:\t%u\n"
  149. "key_size:\t%u\n"
  150. "value_size:\t%u\n"
  151. "max_entries:\t%u\n"
  152. "map_flags:\t%#x\n"
  153. "memlock:\t%llu\n",
  154. map->map_type,
  155. map->key_size,
  156. map->value_size,
  157. map->max_entries,
  158. map->map_flags,
  159. map->pages * 1ULL << PAGE_SHIFT);
  160. if (owner_prog_type)
  161. seq_printf(m, "owner_prog_type:\t%u\n",
  162. owner_prog_type);
  163. }
  164. #endif
  165. static const struct file_operations bpf_map_fops = {
  166. #ifdef CONFIG_PROC_FS
  167. .show_fdinfo = bpf_map_show_fdinfo,
  168. #endif
  169. .release = bpf_map_release,
  170. };
  171. int bpf_map_new_fd(struct bpf_map *map)
  172. {
  173. return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
  174. O_RDWR | O_CLOEXEC);
  175. }
  176. /* helper macro to check that unused fields 'union bpf_attr' are zero */
  177. #define CHECK_ATTR(CMD) \
  178. memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
  179. sizeof(attr->CMD##_LAST_FIELD), 0, \
  180. sizeof(*attr) - \
  181. offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
  182. sizeof(attr->CMD##_LAST_FIELD)) != NULL
  183. #define BPF_MAP_CREATE_LAST_FIELD map_flags
  184. /* called via syscall */
  185. static int map_create(union bpf_attr *attr)
  186. {
  187. struct bpf_map *map;
  188. int err;
  189. err = CHECK_ATTR(BPF_MAP_CREATE);
  190. if (err)
  191. return -EINVAL;
  192. /* find map type and init map: hashtable vs rbtree vs bloom vs ... */
  193. map = find_and_alloc_map(attr);
  194. if (IS_ERR(map))
  195. return PTR_ERR(map);
  196. atomic_set(&map->refcnt, 1);
  197. atomic_set(&map->usercnt, 1);
  198. err = bpf_map_charge_memlock(map);
  199. if (err)
  200. goto free_map_nouncharge;
  201. err = bpf_map_new_fd(map);
  202. if (err < 0)
  203. /* failed to allocate fd */
  204. goto free_map;
  205. trace_bpf_map_create(map, err);
  206. return err;
  207. free_map:
  208. bpf_map_uncharge_memlock(map);
  209. free_map_nouncharge:
  210. map->ops->map_free(map);
  211. return err;
  212. }
  213. /* if error is returned, fd is released.
  214. * On success caller should complete fd access with matching fdput()
  215. */
  216. struct bpf_map *__bpf_map_get(struct fd f)
  217. {
  218. if (!f.file)
  219. return ERR_PTR(-EBADF);
  220. if (f.file->f_op != &bpf_map_fops) {
  221. fdput(f);
  222. return ERR_PTR(-EINVAL);
  223. }
  224. return f.file->private_data;
  225. }
  226. /* prog's and map's refcnt limit */
  227. #define BPF_MAX_REFCNT 32768
  228. struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref)
  229. {
  230. if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) {
  231. atomic_dec(&map->refcnt);
  232. return ERR_PTR(-EBUSY);
  233. }
  234. if (uref)
  235. atomic_inc(&map->usercnt);
  236. return map;
  237. }
  238. struct bpf_map *bpf_map_get_with_uref(u32 ufd)
  239. {
  240. struct fd f = fdget(ufd);
  241. struct bpf_map *map;
  242. map = __bpf_map_get(f);
  243. if (IS_ERR(map))
  244. return map;
  245. map = bpf_map_inc(map, true);
  246. fdput(f);
  247. return map;
  248. }
  249. int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
  250. {
  251. return -ENOTSUPP;
  252. }
  253. /* last field in 'union bpf_attr' used by this command */
  254. #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD value
  255. static int map_lookup_elem(union bpf_attr *attr)
  256. {
  257. void __user *ukey = u64_to_user_ptr(attr->key);
  258. void __user *uvalue = u64_to_user_ptr(attr->value);
  259. int ufd = attr->map_fd;
  260. struct bpf_map *map;
  261. void *key, *value, *ptr;
  262. u32 value_size;
  263. struct fd f;
  264. int err;
  265. if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
  266. return -EINVAL;
  267. f = fdget(ufd);
  268. map = __bpf_map_get(f);
  269. if (IS_ERR(map))
  270. return PTR_ERR(map);
  271. err = -ENOMEM;
  272. key = kmalloc(map->key_size, GFP_USER);
  273. if (!key)
  274. goto err_put;
  275. err = -EFAULT;
  276. if (copy_from_user(key, ukey, map->key_size) != 0)
  277. goto free_key;
  278. if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
  279. map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
  280. map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
  281. value_size = round_up(map->value_size, 8) * num_possible_cpus();
  282. else
  283. value_size = map->value_size;
  284. err = -ENOMEM;
  285. value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
  286. if (!value)
  287. goto free_key;
  288. if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
  289. map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
  290. err = bpf_percpu_hash_copy(map, key, value);
  291. } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
  292. err = bpf_percpu_array_copy(map, key, value);
  293. } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
  294. err = bpf_stackmap_copy(map, key, value);
  295. } else {
  296. rcu_read_lock();
  297. ptr = map->ops->map_lookup_elem(map, key);
  298. if (ptr)
  299. memcpy(value, ptr, value_size);
  300. rcu_read_unlock();
  301. err = ptr ? 0 : -ENOENT;
  302. }
  303. if (err)
  304. goto free_value;
  305. err = -EFAULT;
  306. if (copy_to_user(uvalue, value, value_size) != 0)
  307. goto free_value;
  308. trace_bpf_map_lookup_elem(map, ufd, key, value);
  309. err = 0;
  310. free_value:
  311. kfree(value);
  312. free_key:
  313. kfree(key);
  314. err_put:
  315. fdput(f);
  316. return err;
  317. }
  318. #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
  319. static int map_update_elem(union bpf_attr *attr)
  320. {
  321. void __user *ukey = u64_to_user_ptr(attr->key);
  322. void __user *uvalue = u64_to_user_ptr(attr->value);
  323. int ufd = attr->map_fd;
  324. struct bpf_map *map;
  325. void *key, *value;
  326. u32 value_size;
  327. struct fd f;
  328. int err;
  329. if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
  330. return -EINVAL;
  331. f = fdget(ufd);
  332. map = __bpf_map_get(f);
  333. if (IS_ERR(map))
  334. return PTR_ERR(map);
  335. err = -ENOMEM;
  336. key = kmalloc(map->key_size, GFP_USER);
  337. if (!key)
  338. goto err_put;
  339. err = -EFAULT;
  340. if (copy_from_user(key, ukey, map->key_size) != 0)
  341. goto free_key;
  342. if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
  343. map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
  344. map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
  345. value_size = round_up(map->value_size, 8) * num_possible_cpus();
  346. else
  347. value_size = map->value_size;
  348. err = -ENOMEM;
  349. value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
  350. if (!value)
  351. goto free_key;
  352. err = -EFAULT;
  353. if (copy_from_user(value, uvalue, value_size) != 0)
  354. goto free_value;
  355. /* must increment bpf_prog_active to avoid kprobe+bpf triggering from
  356. * inside bpf map update or delete otherwise deadlocks are possible
  357. */
  358. preempt_disable();
  359. __this_cpu_inc(bpf_prog_active);
  360. if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
  361. map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
  362. err = bpf_percpu_hash_update(map, key, value, attr->flags);
  363. } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
  364. err = bpf_percpu_array_update(map, key, value, attr->flags);
  365. } else if (map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY ||
  366. map->map_type == BPF_MAP_TYPE_PROG_ARRAY ||
  367. map->map_type == BPF_MAP_TYPE_CGROUP_ARRAY) {
  368. rcu_read_lock();
  369. err = bpf_fd_array_map_update_elem(map, f.file, key, value,
  370. attr->flags);
  371. rcu_read_unlock();
  372. } else {
  373. rcu_read_lock();
  374. err = map->ops->map_update_elem(map, key, value, attr->flags);
  375. rcu_read_unlock();
  376. }
  377. __this_cpu_dec(bpf_prog_active);
  378. preempt_enable();
  379. if (!err)
  380. trace_bpf_map_update_elem(map, ufd, key, value);
  381. free_value:
  382. kfree(value);
  383. free_key:
  384. kfree(key);
  385. err_put:
  386. fdput(f);
  387. return err;
  388. }
  389. #define BPF_MAP_DELETE_ELEM_LAST_FIELD key
  390. static int map_delete_elem(union bpf_attr *attr)
  391. {
  392. void __user *ukey = u64_to_user_ptr(attr->key);
  393. int ufd = attr->map_fd;
  394. struct bpf_map *map;
  395. struct fd f;
  396. void *key;
  397. int err;
  398. if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
  399. return -EINVAL;
  400. f = fdget(ufd);
  401. map = __bpf_map_get(f);
  402. if (IS_ERR(map))
  403. return PTR_ERR(map);
  404. err = -ENOMEM;
  405. key = kmalloc(map->key_size, GFP_USER);
  406. if (!key)
  407. goto err_put;
  408. err = -EFAULT;
  409. if (copy_from_user(key, ukey, map->key_size) != 0)
  410. goto free_key;
  411. preempt_disable();
  412. __this_cpu_inc(bpf_prog_active);
  413. rcu_read_lock();
  414. err = map->ops->map_delete_elem(map, key);
  415. rcu_read_unlock();
  416. __this_cpu_dec(bpf_prog_active);
  417. preempt_enable();
  418. if (!err)
  419. trace_bpf_map_delete_elem(map, ufd, key);
  420. free_key:
  421. kfree(key);
  422. err_put:
  423. fdput(f);
  424. return err;
  425. }
  426. /* last field in 'union bpf_attr' used by this command */
  427. #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
  428. static int map_get_next_key(union bpf_attr *attr)
  429. {
  430. void __user *ukey = u64_to_user_ptr(attr->key);
  431. void __user *unext_key = u64_to_user_ptr(attr->next_key);
  432. int ufd = attr->map_fd;
  433. struct bpf_map *map;
  434. void *key, *next_key;
  435. struct fd f;
  436. int err;
  437. if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
  438. return -EINVAL;
  439. f = fdget(ufd);
  440. map = __bpf_map_get(f);
  441. if (IS_ERR(map))
  442. return PTR_ERR(map);
  443. err = -ENOMEM;
  444. key = kmalloc(map->key_size, GFP_USER);
  445. if (!key)
  446. goto err_put;
  447. err = -EFAULT;
  448. if (copy_from_user(key, ukey, map->key_size) != 0)
  449. goto free_key;
  450. err = -ENOMEM;
  451. next_key = kmalloc(map->key_size, GFP_USER);
  452. if (!next_key)
  453. goto free_key;
  454. rcu_read_lock();
  455. err = map->ops->map_get_next_key(map, key, next_key);
  456. rcu_read_unlock();
  457. if (err)
  458. goto free_next_key;
  459. err = -EFAULT;
  460. if (copy_to_user(unext_key, next_key, map->key_size) != 0)
  461. goto free_next_key;
  462. trace_bpf_map_next_key(map, ufd, key, next_key);
  463. err = 0;
  464. free_next_key:
  465. kfree(next_key);
  466. free_key:
  467. kfree(key);
  468. err_put:
  469. fdput(f);
  470. return err;
  471. }
  472. static LIST_HEAD(bpf_prog_types);
  473. static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
  474. {
  475. struct bpf_prog_type_list *tl;
  476. list_for_each_entry(tl, &bpf_prog_types, list_node) {
  477. if (tl->type == type) {
  478. prog->aux->ops = tl->ops;
  479. prog->type = type;
  480. return 0;
  481. }
  482. }
  483. return -EINVAL;
  484. }
  485. void bpf_register_prog_type(struct bpf_prog_type_list *tl)
  486. {
  487. list_add(&tl->list_node, &bpf_prog_types);
  488. }
  489. /* fixup insn->imm field of bpf_call instructions:
  490. * if (insn->imm == BPF_FUNC_map_lookup_elem)
  491. * insn->imm = bpf_map_lookup_elem - __bpf_call_base;
  492. * else if (insn->imm == BPF_FUNC_map_update_elem)
  493. * insn->imm = bpf_map_update_elem - __bpf_call_base;
  494. * else ...
  495. *
  496. * this function is called after eBPF program passed verification
  497. */
  498. static void fixup_bpf_calls(struct bpf_prog *prog)
  499. {
  500. const struct bpf_func_proto *fn;
  501. int i;
  502. for (i = 0; i < prog->len; i++) {
  503. struct bpf_insn *insn = &prog->insnsi[i];
  504. if (insn->code == (BPF_JMP | BPF_CALL)) {
  505. /* we reach here when program has bpf_call instructions
  506. * and it passed bpf_check(), means that
  507. * ops->get_func_proto must have been supplied, check it
  508. */
  509. BUG_ON(!prog->aux->ops->get_func_proto);
  510. if (insn->imm == BPF_FUNC_get_route_realm)
  511. prog->dst_needed = 1;
  512. if (insn->imm == BPF_FUNC_get_prandom_u32)
  513. bpf_user_rnd_init_once();
  514. if (insn->imm == BPF_FUNC_xdp_adjust_head)
  515. prog->xdp_adjust_head = 1;
  516. if (insn->imm == BPF_FUNC_tail_call) {
  517. /* mark bpf_tail_call as different opcode
  518. * to avoid conditional branch in
  519. * interpeter for every normal call
  520. * and to prevent accidental JITing by
  521. * JIT compiler that doesn't support
  522. * bpf_tail_call yet
  523. */
  524. insn->imm = 0;
  525. insn->code |= BPF_X;
  526. continue;
  527. }
  528. fn = prog->aux->ops->get_func_proto(insn->imm);
  529. /* all functions that have prototype and verifier allowed
  530. * programs to call them, must be real in-kernel functions
  531. */
  532. BUG_ON(!fn->func);
  533. insn->imm = fn->func - __bpf_call_base;
  534. }
  535. }
  536. }
  537. /* drop refcnt on maps used by eBPF program and free auxilary data */
  538. static void free_used_maps(struct bpf_prog_aux *aux)
  539. {
  540. int i;
  541. for (i = 0; i < aux->used_map_cnt; i++)
  542. bpf_map_put(aux->used_maps[i]);
  543. kfree(aux->used_maps);
  544. }
  545. int __bpf_prog_charge(struct user_struct *user, u32 pages)
  546. {
  547. unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
  548. unsigned long user_bufs;
  549. if (user) {
  550. user_bufs = atomic_long_add_return(pages, &user->locked_vm);
  551. if (user_bufs > memlock_limit) {
  552. atomic_long_sub(pages, &user->locked_vm);
  553. return -EPERM;
  554. }
  555. }
  556. return 0;
  557. }
  558. void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
  559. {
  560. if (user)
  561. atomic_long_sub(pages, &user->locked_vm);
  562. }
  563. static int bpf_prog_charge_memlock(struct bpf_prog *prog)
  564. {
  565. struct user_struct *user = get_current_user();
  566. int ret;
  567. ret = __bpf_prog_charge(user, prog->pages);
  568. if (ret) {
  569. free_uid(user);
  570. return ret;
  571. }
  572. prog->aux->user = user;
  573. return 0;
  574. }
  575. static void bpf_prog_uncharge_memlock(struct bpf_prog *prog)
  576. {
  577. struct user_struct *user = prog->aux->user;
  578. __bpf_prog_uncharge(user, prog->pages);
  579. free_uid(user);
  580. }
  581. static void __bpf_prog_put_rcu(struct rcu_head *rcu)
  582. {
  583. struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
  584. free_used_maps(aux);
  585. bpf_prog_uncharge_memlock(aux->prog);
  586. bpf_prog_free(aux->prog);
  587. }
  588. void bpf_prog_put(struct bpf_prog *prog)
  589. {
  590. if (atomic_dec_and_test(&prog->aux->refcnt)) {
  591. trace_bpf_prog_put_rcu(prog);
  592. bpf_prog_kallsyms_del(prog);
  593. call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
  594. }
  595. }
  596. EXPORT_SYMBOL_GPL(bpf_prog_put);
  597. static int bpf_prog_release(struct inode *inode, struct file *filp)
  598. {
  599. struct bpf_prog *prog = filp->private_data;
  600. bpf_prog_put(prog);
  601. return 0;
  602. }
  603. #ifdef CONFIG_PROC_FS
  604. static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
  605. {
  606. const struct bpf_prog *prog = filp->private_data;
  607. char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
  608. bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
  609. seq_printf(m,
  610. "prog_type:\t%u\n"
  611. "prog_jited:\t%u\n"
  612. "prog_tag:\t%s\n"
  613. "memlock:\t%llu\n",
  614. prog->type,
  615. prog->jited,
  616. prog_tag,
  617. prog->pages * 1ULL << PAGE_SHIFT);
  618. }
  619. #endif
  620. static const struct file_operations bpf_prog_fops = {
  621. #ifdef CONFIG_PROC_FS
  622. .show_fdinfo = bpf_prog_show_fdinfo,
  623. #endif
  624. .release = bpf_prog_release,
  625. };
  626. int bpf_prog_new_fd(struct bpf_prog *prog)
  627. {
  628. return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
  629. O_RDWR | O_CLOEXEC);
  630. }
  631. static struct bpf_prog *____bpf_prog_get(struct fd f)
  632. {
  633. if (!f.file)
  634. return ERR_PTR(-EBADF);
  635. if (f.file->f_op != &bpf_prog_fops) {
  636. fdput(f);
  637. return ERR_PTR(-EINVAL);
  638. }
  639. return f.file->private_data;
  640. }
  641. struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i)
  642. {
  643. if (atomic_add_return(i, &prog->aux->refcnt) > BPF_MAX_REFCNT) {
  644. atomic_sub(i, &prog->aux->refcnt);
  645. return ERR_PTR(-EBUSY);
  646. }
  647. return prog;
  648. }
  649. EXPORT_SYMBOL_GPL(bpf_prog_add);
  650. void bpf_prog_sub(struct bpf_prog *prog, int i)
  651. {
  652. /* Only to be used for undoing previous bpf_prog_add() in some
  653. * error path. We still know that another entity in our call
  654. * path holds a reference to the program, thus atomic_sub() can
  655. * be safely used in such cases!
  656. */
  657. WARN_ON(atomic_sub_return(i, &prog->aux->refcnt) == 0);
  658. }
  659. EXPORT_SYMBOL_GPL(bpf_prog_sub);
  660. struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
  661. {
  662. return bpf_prog_add(prog, 1);
  663. }
  664. EXPORT_SYMBOL_GPL(bpf_prog_inc);
  665. static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *type)
  666. {
  667. struct fd f = fdget(ufd);
  668. struct bpf_prog *prog;
  669. prog = ____bpf_prog_get(f);
  670. if (IS_ERR(prog))
  671. return prog;
  672. if (type && prog->type != *type) {
  673. prog = ERR_PTR(-EINVAL);
  674. goto out;
  675. }
  676. prog = bpf_prog_inc(prog);
  677. out:
  678. fdput(f);
  679. return prog;
  680. }
  681. struct bpf_prog *bpf_prog_get(u32 ufd)
  682. {
  683. return __bpf_prog_get(ufd, NULL);
  684. }
  685. struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type)
  686. {
  687. struct bpf_prog *prog = __bpf_prog_get(ufd, &type);
  688. if (!IS_ERR(prog))
  689. trace_bpf_prog_get_type(prog);
  690. return prog;
  691. }
  692. EXPORT_SYMBOL_GPL(bpf_prog_get_type);
  693. /* last field in 'union bpf_attr' used by this command */
  694. #define BPF_PROG_LOAD_LAST_FIELD kern_version
  695. static int bpf_prog_load(union bpf_attr *attr)
  696. {
  697. enum bpf_prog_type type = attr->prog_type;
  698. struct bpf_prog *prog;
  699. int err;
  700. char license[128];
  701. bool is_gpl;
  702. if (CHECK_ATTR(BPF_PROG_LOAD))
  703. return -EINVAL;
  704. /* copy eBPF program license from user space */
  705. if (strncpy_from_user(license, u64_to_user_ptr(attr->license),
  706. sizeof(license) - 1) < 0)
  707. return -EFAULT;
  708. license[sizeof(license) - 1] = 0;
  709. /* eBPF programs must be GPL compatible to use GPL-ed functions */
  710. is_gpl = license_is_gpl_compatible(license);
  711. if (attr->insn_cnt == 0 || attr->insn_cnt > BPF_MAXINSNS)
  712. return -E2BIG;
  713. if (type == BPF_PROG_TYPE_KPROBE &&
  714. attr->kern_version != LINUX_VERSION_CODE)
  715. return -EINVAL;
  716. if (type != BPF_PROG_TYPE_SOCKET_FILTER && !capable(CAP_SYS_ADMIN))
  717. return -EPERM;
  718. /* plain bpf_prog allocation */
  719. prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
  720. if (!prog)
  721. return -ENOMEM;
  722. err = bpf_prog_charge_memlock(prog);
  723. if (err)
  724. goto free_prog_nouncharge;
  725. prog->len = attr->insn_cnt;
  726. err = -EFAULT;
  727. if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns),
  728. bpf_prog_insn_size(prog)) != 0)
  729. goto free_prog;
  730. prog->orig_prog = NULL;
  731. prog->jited = 0;
  732. atomic_set(&prog->aux->refcnt, 1);
  733. prog->gpl_compatible = is_gpl ? 1 : 0;
  734. /* find program type: socket_filter vs tracing_filter */
  735. err = find_prog_type(type, prog);
  736. if (err < 0)
  737. goto free_prog;
  738. /* run eBPF verifier */
  739. err = bpf_check(&prog, attr);
  740. if (err < 0)
  741. goto free_used_maps;
  742. /* fixup BPF_CALL->imm field */
  743. fixup_bpf_calls(prog);
  744. /* eBPF program is ready to be JITed */
  745. prog = bpf_prog_select_runtime(prog, &err);
  746. if (err < 0)
  747. goto free_used_maps;
  748. err = bpf_prog_new_fd(prog);
  749. if (err < 0)
  750. /* failed to allocate fd */
  751. goto free_used_maps;
  752. bpf_prog_kallsyms_add(prog);
  753. trace_bpf_prog_load(prog, err);
  754. return err;
  755. free_used_maps:
  756. free_used_maps(prog->aux);
  757. free_prog:
  758. bpf_prog_uncharge_memlock(prog);
  759. free_prog_nouncharge:
  760. bpf_prog_free(prog);
  761. return err;
  762. }
  763. #define BPF_OBJ_LAST_FIELD bpf_fd
  764. static int bpf_obj_pin(const union bpf_attr *attr)
  765. {
  766. if (CHECK_ATTR(BPF_OBJ))
  767. return -EINVAL;
  768. return bpf_obj_pin_user(attr->bpf_fd, u64_to_user_ptr(attr->pathname));
  769. }
  770. static int bpf_obj_get(const union bpf_attr *attr)
  771. {
  772. if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0)
  773. return -EINVAL;
  774. return bpf_obj_get_user(u64_to_user_ptr(attr->pathname));
  775. }
  776. #ifdef CONFIG_CGROUP_BPF
  777. #define BPF_PROG_ATTACH_LAST_FIELD attach_flags
  778. static int bpf_prog_attach(const union bpf_attr *attr)
  779. {
  780. enum bpf_prog_type ptype;
  781. struct bpf_prog *prog;
  782. struct cgroup *cgrp;
  783. int ret;
  784. if (!capable(CAP_NET_ADMIN))
  785. return -EPERM;
  786. if (CHECK_ATTR(BPF_PROG_ATTACH))
  787. return -EINVAL;
  788. if (attr->attach_flags & ~BPF_F_ALLOW_OVERRIDE)
  789. return -EINVAL;
  790. switch (attr->attach_type) {
  791. case BPF_CGROUP_INET_INGRESS:
  792. case BPF_CGROUP_INET_EGRESS:
  793. ptype = BPF_PROG_TYPE_CGROUP_SKB;
  794. break;
  795. case BPF_CGROUP_INET_SOCK_CREATE:
  796. ptype = BPF_PROG_TYPE_CGROUP_SOCK;
  797. break;
  798. default:
  799. return -EINVAL;
  800. }
  801. prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
  802. if (IS_ERR(prog))
  803. return PTR_ERR(prog);
  804. cgrp = cgroup_get_from_fd(attr->target_fd);
  805. if (IS_ERR(cgrp)) {
  806. bpf_prog_put(prog);
  807. return PTR_ERR(cgrp);
  808. }
  809. ret = cgroup_bpf_update(cgrp, prog, attr->attach_type,
  810. attr->attach_flags & BPF_F_ALLOW_OVERRIDE);
  811. if (ret)
  812. bpf_prog_put(prog);
  813. cgroup_put(cgrp);
  814. return ret;
  815. }
  816. #define BPF_PROG_DETACH_LAST_FIELD attach_type
  817. static int bpf_prog_detach(const union bpf_attr *attr)
  818. {
  819. struct cgroup *cgrp;
  820. int ret;
  821. if (!capable(CAP_NET_ADMIN))
  822. return -EPERM;
  823. if (CHECK_ATTR(BPF_PROG_DETACH))
  824. return -EINVAL;
  825. switch (attr->attach_type) {
  826. case BPF_CGROUP_INET_INGRESS:
  827. case BPF_CGROUP_INET_EGRESS:
  828. case BPF_CGROUP_INET_SOCK_CREATE:
  829. cgrp = cgroup_get_from_fd(attr->target_fd);
  830. if (IS_ERR(cgrp))
  831. return PTR_ERR(cgrp);
  832. ret = cgroup_bpf_update(cgrp, NULL, attr->attach_type, false);
  833. cgroup_put(cgrp);
  834. break;
  835. default:
  836. return -EINVAL;
  837. }
  838. return ret;
  839. }
  840. #endif /* CONFIG_CGROUP_BPF */
  841. SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
  842. {
  843. union bpf_attr attr = {};
  844. int err;
  845. if (!capable(CAP_SYS_ADMIN) && sysctl_unprivileged_bpf_disabled)
  846. return -EPERM;
  847. if (!access_ok(VERIFY_READ, uattr, 1))
  848. return -EFAULT;
  849. if (size > PAGE_SIZE) /* silly large */
  850. return -E2BIG;
  851. /* If we're handed a bigger struct than we know of,
  852. * ensure all the unknown bits are 0 - i.e. new
  853. * user-space does not rely on any kernel feature
  854. * extensions we dont know about yet.
  855. */
  856. if (size > sizeof(attr)) {
  857. unsigned char __user *addr;
  858. unsigned char __user *end;
  859. unsigned char val;
  860. addr = (void __user *)uattr + sizeof(attr);
  861. end = (void __user *)uattr + size;
  862. for (; addr < end; addr++) {
  863. err = get_user(val, addr);
  864. if (err)
  865. return err;
  866. if (val)
  867. return -E2BIG;
  868. }
  869. size = sizeof(attr);
  870. }
  871. /* copy attributes from user space, may be less than sizeof(bpf_attr) */
  872. if (copy_from_user(&attr, uattr, size) != 0)
  873. return -EFAULT;
  874. switch (cmd) {
  875. case BPF_MAP_CREATE:
  876. err = map_create(&attr);
  877. break;
  878. case BPF_MAP_LOOKUP_ELEM:
  879. err = map_lookup_elem(&attr);
  880. break;
  881. case BPF_MAP_UPDATE_ELEM:
  882. err = map_update_elem(&attr);
  883. break;
  884. case BPF_MAP_DELETE_ELEM:
  885. err = map_delete_elem(&attr);
  886. break;
  887. case BPF_MAP_GET_NEXT_KEY:
  888. err = map_get_next_key(&attr);
  889. break;
  890. case BPF_PROG_LOAD:
  891. err = bpf_prog_load(&attr);
  892. break;
  893. case BPF_OBJ_PIN:
  894. err = bpf_obj_pin(&attr);
  895. break;
  896. case BPF_OBJ_GET:
  897. err = bpf_obj_get(&attr);
  898. break;
  899. #ifdef CONFIG_CGROUP_BPF
  900. case BPF_PROG_ATTACH:
  901. err = bpf_prog_attach(&attr);
  902. break;
  903. case BPF_PROG_DETACH:
  904. err = bpf_prog_detach(&attr);
  905. break;
  906. #endif
  907. default:
  908. err = -EINVAL;
  909. break;
  910. }
  911. return err;
  912. }