syscall.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113
  1. /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
  2. *
  3. * This program is free software; you can redistribute it and/or
  4. * modify it under the terms of version 2 of the GNU General Public
  5. * License as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful, but
  8. * WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  10. * General Public License for more details.
  11. */
  12. #include <linux/bpf.h>
  13. #include <linux/bpf_trace.h>
  14. #include <linux/syscalls.h>
  15. #include <linux/slab.h>
  16. #include <linux/sched/signal.h>
  17. #include <linux/vmalloc.h>
  18. #include <linux/mmzone.h>
  19. #include <linux/anon_inodes.h>
  20. #include <linux/file.h>
  21. #include <linux/license.h>
  22. #include <linux/filter.h>
  23. #include <linux/version.h>
  24. #include <linux/kernel.h>
  25. DEFINE_PER_CPU(int, bpf_prog_active);
  26. int sysctl_unprivileged_bpf_disabled __read_mostly;
  27. static LIST_HEAD(bpf_map_types);
  28. static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
  29. {
  30. struct bpf_map_type_list *tl;
  31. struct bpf_map *map;
  32. list_for_each_entry(tl, &bpf_map_types, list_node) {
  33. if (tl->type == attr->map_type) {
  34. map = tl->ops->map_alloc(attr);
  35. if (IS_ERR(map))
  36. return map;
  37. map->ops = tl->ops;
  38. map->map_type = attr->map_type;
  39. return map;
  40. }
  41. }
  42. return ERR_PTR(-EINVAL);
  43. }
  44. /* boot time registration of different map implementations */
  45. void bpf_register_map_type(struct bpf_map_type_list *tl)
  46. {
  47. list_add(&tl->list_node, &bpf_map_types);
  48. }
  49. void *bpf_map_area_alloc(size_t size)
  50. {
  51. /* We definitely need __GFP_NORETRY, so OOM killer doesn't
  52. * trigger under memory pressure as we really just want to
  53. * fail instead.
  54. */
  55. const gfp_t flags = __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO;
  56. void *area;
  57. if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
  58. area = kmalloc(size, GFP_USER | flags);
  59. if (area != NULL)
  60. return area;
  61. }
  62. return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | flags,
  63. PAGE_KERNEL);
  64. }
  65. void bpf_map_area_free(void *area)
  66. {
  67. kvfree(area);
  68. }
  69. int bpf_map_precharge_memlock(u32 pages)
  70. {
  71. struct user_struct *user = get_current_user();
  72. unsigned long memlock_limit, cur;
  73. memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
  74. cur = atomic_long_read(&user->locked_vm);
  75. free_uid(user);
  76. if (cur + pages > memlock_limit)
  77. return -EPERM;
  78. return 0;
  79. }
  80. static int bpf_map_charge_memlock(struct bpf_map *map)
  81. {
  82. struct user_struct *user = get_current_user();
  83. unsigned long memlock_limit;
  84. memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
  85. atomic_long_add(map->pages, &user->locked_vm);
  86. if (atomic_long_read(&user->locked_vm) > memlock_limit) {
  87. atomic_long_sub(map->pages, &user->locked_vm);
  88. free_uid(user);
  89. return -EPERM;
  90. }
  91. map->user = user;
  92. return 0;
  93. }
  94. static void bpf_map_uncharge_memlock(struct bpf_map *map)
  95. {
  96. struct user_struct *user = map->user;
  97. atomic_long_sub(map->pages, &user->locked_vm);
  98. free_uid(user);
  99. }
  100. /* called from workqueue */
  101. static void bpf_map_free_deferred(struct work_struct *work)
  102. {
  103. struct bpf_map *map = container_of(work, struct bpf_map, work);
  104. bpf_map_uncharge_memlock(map);
  105. /* implementation dependent freeing */
  106. map->ops->map_free(map);
  107. }
  108. static void bpf_map_put_uref(struct bpf_map *map)
  109. {
  110. if (atomic_dec_and_test(&map->usercnt)) {
  111. if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY)
  112. bpf_fd_array_map_clear(map);
  113. }
  114. }
  115. /* decrement map refcnt and schedule it for freeing via workqueue
  116. * (unrelying map implementation ops->map_free() might sleep)
  117. */
  118. void bpf_map_put(struct bpf_map *map)
  119. {
  120. if (atomic_dec_and_test(&map->refcnt)) {
  121. INIT_WORK(&map->work, bpf_map_free_deferred);
  122. schedule_work(&map->work);
  123. }
  124. }
  125. void bpf_map_put_with_uref(struct bpf_map *map)
  126. {
  127. bpf_map_put_uref(map);
  128. bpf_map_put(map);
  129. }
  130. static int bpf_map_release(struct inode *inode, struct file *filp)
  131. {
  132. struct bpf_map *map = filp->private_data;
  133. if (map->ops->map_release)
  134. map->ops->map_release(map, filp);
  135. bpf_map_put_with_uref(map);
  136. return 0;
  137. }
  138. #ifdef CONFIG_PROC_FS
  139. static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
  140. {
  141. const struct bpf_map *map = filp->private_data;
  142. const struct bpf_array *array;
  143. u32 owner_prog_type = 0;
  144. if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) {
  145. array = container_of(map, struct bpf_array, map);
  146. owner_prog_type = array->owner_prog_type;
  147. }
  148. seq_printf(m,
  149. "map_type:\t%u\n"
  150. "key_size:\t%u\n"
  151. "value_size:\t%u\n"
  152. "max_entries:\t%u\n"
  153. "map_flags:\t%#x\n"
  154. "memlock:\t%llu\n",
  155. map->map_type,
  156. map->key_size,
  157. map->value_size,
  158. map->max_entries,
  159. map->map_flags,
  160. map->pages * 1ULL << PAGE_SHIFT);
  161. if (owner_prog_type)
  162. seq_printf(m, "owner_prog_type:\t%u\n",
  163. owner_prog_type);
  164. }
  165. #endif
  166. static const struct file_operations bpf_map_fops = {
  167. #ifdef CONFIG_PROC_FS
  168. .show_fdinfo = bpf_map_show_fdinfo,
  169. #endif
  170. .release = bpf_map_release,
  171. };
  172. int bpf_map_new_fd(struct bpf_map *map)
  173. {
  174. return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
  175. O_RDWR | O_CLOEXEC);
  176. }
  177. /* helper macro to check that unused fields 'union bpf_attr' are zero */
  178. #define CHECK_ATTR(CMD) \
  179. memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
  180. sizeof(attr->CMD##_LAST_FIELD), 0, \
  181. sizeof(*attr) - \
  182. offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
  183. sizeof(attr->CMD##_LAST_FIELD)) != NULL
  184. #define BPF_MAP_CREATE_LAST_FIELD map_flags
  185. /* called via syscall */
  186. static int map_create(union bpf_attr *attr)
  187. {
  188. struct bpf_map *map;
  189. int err;
  190. err = CHECK_ATTR(BPF_MAP_CREATE);
  191. if (err)
  192. return -EINVAL;
  193. /* find map type and init map: hashtable vs rbtree vs bloom vs ... */
  194. map = find_and_alloc_map(attr);
  195. if (IS_ERR(map))
  196. return PTR_ERR(map);
  197. atomic_set(&map->refcnt, 1);
  198. atomic_set(&map->usercnt, 1);
  199. err = bpf_map_charge_memlock(map);
  200. if (err)
  201. goto free_map_nouncharge;
  202. err = bpf_map_new_fd(map);
  203. if (err < 0)
  204. /* failed to allocate fd */
  205. goto free_map;
  206. trace_bpf_map_create(map, err);
  207. return err;
  208. free_map:
  209. bpf_map_uncharge_memlock(map);
  210. free_map_nouncharge:
  211. map->ops->map_free(map);
  212. return err;
  213. }
  214. /* if error is returned, fd is released.
  215. * On success caller should complete fd access with matching fdput()
  216. */
  217. struct bpf_map *__bpf_map_get(struct fd f)
  218. {
  219. if (!f.file)
  220. return ERR_PTR(-EBADF);
  221. if (f.file->f_op != &bpf_map_fops) {
  222. fdput(f);
  223. return ERR_PTR(-EINVAL);
  224. }
  225. return f.file->private_data;
  226. }
  227. /* prog's and map's refcnt limit */
  228. #define BPF_MAX_REFCNT 32768
  229. struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref)
  230. {
  231. if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) {
  232. atomic_dec(&map->refcnt);
  233. return ERR_PTR(-EBUSY);
  234. }
  235. if (uref)
  236. atomic_inc(&map->usercnt);
  237. return map;
  238. }
  239. struct bpf_map *bpf_map_get_with_uref(u32 ufd)
  240. {
  241. struct fd f = fdget(ufd);
  242. struct bpf_map *map;
  243. map = __bpf_map_get(f);
  244. if (IS_ERR(map))
  245. return map;
  246. map = bpf_map_inc(map, true);
  247. fdput(f);
  248. return map;
  249. }
  250. int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
  251. {
  252. return -ENOTSUPP;
  253. }
  254. /* last field in 'union bpf_attr' used by this command */
  255. #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD value
  256. static int map_lookup_elem(union bpf_attr *attr)
  257. {
  258. void __user *ukey = u64_to_user_ptr(attr->key);
  259. void __user *uvalue = u64_to_user_ptr(attr->value);
  260. int ufd = attr->map_fd;
  261. struct bpf_map *map;
  262. void *key, *value, *ptr;
  263. u32 value_size;
  264. struct fd f;
  265. int err;
  266. if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
  267. return -EINVAL;
  268. f = fdget(ufd);
  269. map = __bpf_map_get(f);
  270. if (IS_ERR(map))
  271. return PTR_ERR(map);
  272. err = -ENOMEM;
  273. key = kmalloc(map->key_size, GFP_USER);
  274. if (!key)
  275. goto err_put;
  276. err = -EFAULT;
  277. if (copy_from_user(key, ukey, map->key_size) != 0)
  278. goto free_key;
  279. if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
  280. map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
  281. map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
  282. value_size = round_up(map->value_size, 8) * num_possible_cpus();
  283. else
  284. value_size = map->value_size;
  285. err = -ENOMEM;
  286. value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
  287. if (!value)
  288. goto free_key;
  289. if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
  290. map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
  291. err = bpf_percpu_hash_copy(map, key, value);
  292. } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
  293. err = bpf_percpu_array_copy(map, key, value);
  294. } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
  295. err = bpf_stackmap_copy(map, key, value);
  296. } else {
  297. rcu_read_lock();
  298. ptr = map->ops->map_lookup_elem(map, key);
  299. if (ptr)
  300. memcpy(value, ptr, value_size);
  301. rcu_read_unlock();
  302. err = ptr ? 0 : -ENOENT;
  303. }
  304. if (err)
  305. goto free_value;
  306. err = -EFAULT;
  307. if (copy_to_user(uvalue, value, value_size) != 0)
  308. goto free_value;
  309. trace_bpf_map_lookup_elem(map, ufd, key, value);
  310. err = 0;
  311. free_value:
  312. kfree(value);
  313. free_key:
  314. kfree(key);
  315. err_put:
  316. fdput(f);
  317. return err;
  318. }
  319. #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
  320. static int map_update_elem(union bpf_attr *attr)
  321. {
  322. void __user *ukey = u64_to_user_ptr(attr->key);
  323. void __user *uvalue = u64_to_user_ptr(attr->value);
  324. int ufd = attr->map_fd;
  325. struct bpf_map *map;
  326. void *key, *value;
  327. u32 value_size;
  328. struct fd f;
  329. int err;
  330. if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
  331. return -EINVAL;
  332. f = fdget(ufd);
  333. map = __bpf_map_get(f);
  334. if (IS_ERR(map))
  335. return PTR_ERR(map);
  336. err = -ENOMEM;
  337. key = kmalloc(map->key_size, GFP_USER);
  338. if (!key)
  339. goto err_put;
  340. err = -EFAULT;
  341. if (copy_from_user(key, ukey, map->key_size) != 0)
  342. goto free_key;
  343. if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
  344. map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
  345. map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
  346. value_size = round_up(map->value_size, 8) * num_possible_cpus();
  347. else
  348. value_size = map->value_size;
  349. err = -ENOMEM;
  350. value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
  351. if (!value)
  352. goto free_key;
  353. err = -EFAULT;
  354. if (copy_from_user(value, uvalue, value_size) != 0)
  355. goto free_value;
  356. /* must increment bpf_prog_active to avoid kprobe+bpf triggering from
  357. * inside bpf map update or delete otherwise deadlocks are possible
  358. */
  359. preempt_disable();
  360. __this_cpu_inc(bpf_prog_active);
  361. if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
  362. map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
  363. err = bpf_percpu_hash_update(map, key, value, attr->flags);
  364. } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
  365. err = bpf_percpu_array_update(map, key, value, attr->flags);
  366. } else if (map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY ||
  367. map->map_type == BPF_MAP_TYPE_PROG_ARRAY ||
  368. map->map_type == BPF_MAP_TYPE_CGROUP_ARRAY) {
  369. rcu_read_lock();
  370. err = bpf_fd_array_map_update_elem(map, f.file, key, value,
  371. attr->flags);
  372. rcu_read_unlock();
  373. } else {
  374. rcu_read_lock();
  375. err = map->ops->map_update_elem(map, key, value, attr->flags);
  376. rcu_read_unlock();
  377. }
  378. __this_cpu_dec(bpf_prog_active);
  379. preempt_enable();
  380. if (!err)
  381. trace_bpf_map_update_elem(map, ufd, key, value);
  382. free_value:
  383. kfree(value);
  384. free_key:
  385. kfree(key);
  386. err_put:
  387. fdput(f);
  388. return err;
  389. }
  390. #define BPF_MAP_DELETE_ELEM_LAST_FIELD key
  391. static int map_delete_elem(union bpf_attr *attr)
  392. {
  393. void __user *ukey = u64_to_user_ptr(attr->key);
  394. int ufd = attr->map_fd;
  395. struct bpf_map *map;
  396. struct fd f;
  397. void *key;
  398. int err;
  399. if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
  400. return -EINVAL;
  401. f = fdget(ufd);
  402. map = __bpf_map_get(f);
  403. if (IS_ERR(map))
  404. return PTR_ERR(map);
  405. err = -ENOMEM;
  406. key = kmalloc(map->key_size, GFP_USER);
  407. if (!key)
  408. goto err_put;
  409. err = -EFAULT;
  410. if (copy_from_user(key, ukey, map->key_size) != 0)
  411. goto free_key;
  412. preempt_disable();
  413. __this_cpu_inc(bpf_prog_active);
  414. rcu_read_lock();
  415. err = map->ops->map_delete_elem(map, key);
  416. rcu_read_unlock();
  417. __this_cpu_dec(bpf_prog_active);
  418. preempt_enable();
  419. if (!err)
  420. trace_bpf_map_delete_elem(map, ufd, key);
  421. free_key:
  422. kfree(key);
  423. err_put:
  424. fdput(f);
  425. return err;
  426. }
  427. /* last field in 'union bpf_attr' used by this command */
  428. #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
  429. static int map_get_next_key(union bpf_attr *attr)
  430. {
  431. void __user *ukey = u64_to_user_ptr(attr->key);
  432. void __user *unext_key = u64_to_user_ptr(attr->next_key);
  433. int ufd = attr->map_fd;
  434. struct bpf_map *map;
  435. void *key, *next_key;
  436. struct fd f;
  437. int err;
  438. if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
  439. return -EINVAL;
  440. f = fdget(ufd);
  441. map = __bpf_map_get(f);
  442. if (IS_ERR(map))
  443. return PTR_ERR(map);
  444. err = -ENOMEM;
  445. key = kmalloc(map->key_size, GFP_USER);
  446. if (!key)
  447. goto err_put;
  448. err = -EFAULT;
  449. if (copy_from_user(key, ukey, map->key_size) != 0)
  450. goto free_key;
  451. err = -ENOMEM;
  452. next_key = kmalloc(map->key_size, GFP_USER);
  453. if (!next_key)
  454. goto free_key;
  455. rcu_read_lock();
  456. err = map->ops->map_get_next_key(map, key, next_key);
  457. rcu_read_unlock();
  458. if (err)
  459. goto free_next_key;
  460. err = -EFAULT;
  461. if (copy_to_user(unext_key, next_key, map->key_size) != 0)
  462. goto free_next_key;
  463. trace_bpf_map_next_key(map, ufd, key, next_key);
  464. err = 0;
  465. free_next_key:
  466. kfree(next_key);
  467. free_key:
  468. kfree(key);
  469. err_put:
  470. fdput(f);
  471. return err;
  472. }
  473. static LIST_HEAD(bpf_prog_types);
  474. static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
  475. {
  476. struct bpf_prog_type_list *tl;
  477. list_for_each_entry(tl, &bpf_prog_types, list_node) {
  478. if (tl->type == type) {
  479. prog->aux->ops = tl->ops;
  480. prog->type = type;
  481. return 0;
  482. }
  483. }
  484. return -EINVAL;
  485. }
  486. void bpf_register_prog_type(struct bpf_prog_type_list *tl)
  487. {
  488. list_add(&tl->list_node, &bpf_prog_types);
  489. }
  490. /* fixup insn->imm field of bpf_call instructions:
  491. * if (insn->imm == BPF_FUNC_map_lookup_elem)
  492. * insn->imm = bpf_map_lookup_elem - __bpf_call_base;
  493. * else if (insn->imm == BPF_FUNC_map_update_elem)
  494. * insn->imm = bpf_map_update_elem - __bpf_call_base;
  495. * else ...
  496. *
  497. * this function is called after eBPF program passed verification
  498. */
  499. static void fixup_bpf_calls(struct bpf_prog *prog)
  500. {
  501. const struct bpf_func_proto *fn;
  502. int i;
  503. for (i = 0; i < prog->len; i++) {
  504. struct bpf_insn *insn = &prog->insnsi[i];
  505. if (insn->code == (BPF_JMP | BPF_CALL)) {
  506. /* we reach here when program has bpf_call instructions
  507. * and it passed bpf_check(), means that
  508. * ops->get_func_proto must have been supplied, check it
  509. */
  510. BUG_ON(!prog->aux->ops->get_func_proto);
  511. if (insn->imm == BPF_FUNC_get_route_realm)
  512. prog->dst_needed = 1;
  513. if (insn->imm == BPF_FUNC_get_prandom_u32)
  514. bpf_user_rnd_init_once();
  515. if (insn->imm == BPF_FUNC_xdp_adjust_head)
  516. prog->xdp_adjust_head = 1;
  517. if (insn->imm == BPF_FUNC_tail_call) {
  518. /* If we tail call into other programs, we
  519. * cannot make any assumptions since they
  520. * can be replaced dynamically during runtime
  521. * in the program array.
  522. */
  523. prog->cb_access = 1;
  524. prog->xdp_adjust_head = 1;
  525. /* mark bpf_tail_call as different opcode
  526. * to avoid conditional branch in
  527. * interpeter for every normal call
  528. * and to prevent accidental JITing by
  529. * JIT compiler that doesn't support
  530. * bpf_tail_call yet
  531. */
  532. insn->imm = 0;
  533. insn->code |= BPF_X;
  534. continue;
  535. }
  536. fn = prog->aux->ops->get_func_proto(insn->imm);
  537. /* all functions that have prototype and verifier allowed
  538. * programs to call them, must be real in-kernel functions
  539. */
  540. BUG_ON(!fn->func);
  541. insn->imm = fn->func - __bpf_call_base;
  542. }
  543. }
  544. }
  545. /* drop refcnt on maps used by eBPF program and free auxilary data */
  546. static void free_used_maps(struct bpf_prog_aux *aux)
  547. {
  548. int i;
  549. for (i = 0; i < aux->used_map_cnt; i++)
  550. bpf_map_put(aux->used_maps[i]);
  551. kfree(aux->used_maps);
  552. }
  553. int __bpf_prog_charge(struct user_struct *user, u32 pages)
  554. {
  555. unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
  556. unsigned long user_bufs;
  557. if (user) {
  558. user_bufs = atomic_long_add_return(pages, &user->locked_vm);
  559. if (user_bufs > memlock_limit) {
  560. atomic_long_sub(pages, &user->locked_vm);
  561. return -EPERM;
  562. }
  563. }
  564. return 0;
  565. }
  566. void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
  567. {
  568. if (user)
  569. atomic_long_sub(pages, &user->locked_vm);
  570. }
  571. static int bpf_prog_charge_memlock(struct bpf_prog *prog)
  572. {
  573. struct user_struct *user = get_current_user();
  574. int ret;
  575. ret = __bpf_prog_charge(user, prog->pages);
  576. if (ret) {
  577. free_uid(user);
  578. return ret;
  579. }
  580. prog->aux->user = user;
  581. return 0;
  582. }
  583. static void bpf_prog_uncharge_memlock(struct bpf_prog *prog)
  584. {
  585. struct user_struct *user = prog->aux->user;
  586. __bpf_prog_uncharge(user, prog->pages);
  587. free_uid(user);
  588. }
  589. static void __bpf_prog_put_rcu(struct rcu_head *rcu)
  590. {
  591. struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
  592. free_used_maps(aux);
  593. bpf_prog_uncharge_memlock(aux->prog);
  594. bpf_prog_free(aux->prog);
  595. }
  596. void bpf_prog_put(struct bpf_prog *prog)
  597. {
  598. if (atomic_dec_and_test(&prog->aux->refcnt)) {
  599. trace_bpf_prog_put_rcu(prog);
  600. bpf_prog_kallsyms_del(prog);
  601. call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
  602. }
  603. }
  604. EXPORT_SYMBOL_GPL(bpf_prog_put);
  605. static int bpf_prog_release(struct inode *inode, struct file *filp)
  606. {
  607. struct bpf_prog *prog = filp->private_data;
  608. bpf_prog_put(prog);
  609. return 0;
  610. }
  611. #ifdef CONFIG_PROC_FS
  612. static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
  613. {
  614. const struct bpf_prog *prog = filp->private_data;
  615. char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
  616. bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
  617. seq_printf(m,
  618. "prog_type:\t%u\n"
  619. "prog_jited:\t%u\n"
  620. "prog_tag:\t%s\n"
  621. "memlock:\t%llu\n",
  622. prog->type,
  623. prog->jited,
  624. prog_tag,
  625. prog->pages * 1ULL << PAGE_SHIFT);
  626. }
  627. #endif
  628. static const struct file_operations bpf_prog_fops = {
  629. #ifdef CONFIG_PROC_FS
  630. .show_fdinfo = bpf_prog_show_fdinfo,
  631. #endif
  632. .release = bpf_prog_release,
  633. };
  634. int bpf_prog_new_fd(struct bpf_prog *prog)
  635. {
  636. return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
  637. O_RDWR | O_CLOEXEC);
  638. }
  639. static struct bpf_prog *____bpf_prog_get(struct fd f)
  640. {
  641. if (!f.file)
  642. return ERR_PTR(-EBADF);
  643. if (f.file->f_op != &bpf_prog_fops) {
  644. fdput(f);
  645. return ERR_PTR(-EINVAL);
  646. }
  647. return f.file->private_data;
  648. }
  649. struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i)
  650. {
  651. if (atomic_add_return(i, &prog->aux->refcnt) > BPF_MAX_REFCNT) {
  652. atomic_sub(i, &prog->aux->refcnt);
  653. return ERR_PTR(-EBUSY);
  654. }
  655. return prog;
  656. }
  657. EXPORT_SYMBOL_GPL(bpf_prog_add);
  658. void bpf_prog_sub(struct bpf_prog *prog, int i)
  659. {
  660. /* Only to be used for undoing previous bpf_prog_add() in some
  661. * error path. We still know that another entity in our call
  662. * path holds a reference to the program, thus atomic_sub() can
  663. * be safely used in such cases!
  664. */
  665. WARN_ON(atomic_sub_return(i, &prog->aux->refcnt) == 0);
  666. }
  667. EXPORT_SYMBOL_GPL(bpf_prog_sub);
  668. struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
  669. {
  670. return bpf_prog_add(prog, 1);
  671. }
  672. EXPORT_SYMBOL_GPL(bpf_prog_inc);
  673. static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *type)
  674. {
  675. struct fd f = fdget(ufd);
  676. struct bpf_prog *prog;
  677. prog = ____bpf_prog_get(f);
  678. if (IS_ERR(prog))
  679. return prog;
  680. if (type && prog->type != *type) {
  681. prog = ERR_PTR(-EINVAL);
  682. goto out;
  683. }
  684. prog = bpf_prog_inc(prog);
  685. out:
  686. fdput(f);
  687. return prog;
  688. }
  689. struct bpf_prog *bpf_prog_get(u32 ufd)
  690. {
  691. return __bpf_prog_get(ufd, NULL);
  692. }
  693. struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type)
  694. {
  695. struct bpf_prog *prog = __bpf_prog_get(ufd, &type);
  696. if (!IS_ERR(prog))
  697. trace_bpf_prog_get_type(prog);
  698. return prog;
  699. }
  700. EXPORT_SYMBOL_GPL(bpf_prog_get_type);
  701. /* last field in 'union bpf_attr' used by this command */
  702. #define BPF_PROG_LOAD_LAST_FIELD kern_version
  703. static int bpf_prog_load(union bpf_attr *attr)
  704. {
  705. enum bpf_prog_type type = attr->prog_type;
  706. struct bpf_prog *prog;
  707. int err;
  708. char license[128];
  709. bool is_gpl;
  710. if (CHECK_ATTR(BPF_PROG_LOAD))
  711. return -EINVAL;
  712. /* copy eBPF program license from user space */
  713. if (strncpy_from_user(license, u64_to_user_ptr(attr->license),
  714. sizeof(license) - 1) < 0)
  715. return -EFAULT;
  716. license[sizeof(license) - 1] = 0;
  717. /* eBPF programs must be GPL compatible to use GPL-ed functions */
  718. is_gpl = license_is_gpl_compatible(license);
  719. if (attr->insn_cnt == 0 || attr->insn_cnt > BPF_MAXINSNS)
  720. return -E2BIG;
  721. if (type == BPF_PROG_TYPE_KPROBE &&
  722. attr->kern_version != LINUX_VERSION_CODE)
  723. return -EINVAL;
  724. if (type != BPF_PROG_TYPE_SOCKET_FILTER && !capable(CAP_SYS_ADMIN))
  725. return -EPERM;
  726. /* plain bpf_prog allocation */
  727. prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
  728. if (!prog)
  729. return -ENOMEM;
  730. err = bpf_prog_charge_memlock(prog);
  731. if (err)
  732. goto free_prog_nouncharge;
  733. prog->len = attr->insn_cnt;
  734. err = -EFAULT;
  735. if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns),
  736. bpf_prog_insn_size(prog)) != 0)
  737. goto free_prog;
  738. prog->orig_prog = NULL;
  739. prog->jited = 0;
  740. atomic_set(&prog->aux->refcnt, 1);
  741. prog->gpl_compatible = is_gpl ? 1 : 0;
  742. /* find program type: socket_filter vs tracing_filter */
  743. err = find_prog_type(type, prog);
  744. if (err < 0)
  745. goto free_prog;
  746. /* run eBPF verifier */
  747. err = bpf_check(&prog, attr);
  748. if (err < 0)
  749. goto free_used_maps;
  750. /* fixup BPF_CALL->imm field */
  751. fixup_bpf_calls(prog);
  752. /* eBPF program is ready to be JITed */
  753. prog = bpf_prog_select_runtime(prog, &err);
  754. if (err < 0)
  755. goto free_used_maps;
  756. err = bpf_prog_new_fd(prog);
  757. if (err < 0)
  758. /* failed to allocate fd */
  759. goto free_used_maps;
  760. bpf_prog_kallsyms_add(prog);
  761. trace_bpf_prog_load(prog, err);
  762. return err;
  763. free_used_maps:
  764. free_used_maps(prog->aux);
  765. free_prog:
  766. bpf_prog_uncharge_memlock(prog);
  767. free_prog_nouncharge:
  768. bpf_prog_free(prog);
  769. return err;
  770. }
  771. #define BPF_OBJ_LAST_FIELD bpf_fd
  772. static int bpf_obj_pin(const union bpf_attr *attr)
  773. {
  774. if (CHECK_ATTR(BPF_OBJ))
  775. return -EINVAL;
  776. return bpf_obj_pin_user(attr->bpf_fd, u64_to_user_ptr(attr->pathname));
  777. }
  778. static int bpf_obj_get(const union bpf_attr *attr)
  779. {
  780. if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0)
  781. return -EINVAL;
  782. return bpf_obj_get_user(u64_to_user_ptr(attr->pathname));
  783. }
  784. #ifdef CONFIG_CGROUP_BPF
  785. #define BPF_PROG_ATTACH_LAST_FIELD attach_flags
  786. static int bpf_prog_attach(const union bpf_attr *attr)
  787. {
  788. enum bpf_prog_type ptype;
  789. struct bpf_prog *prog;
  790. struct cgroup *cgrp;
  791. int ret;
  792. if (!capable(CAP_NET_ADMIN))
  793. return -EPERM;
  794. if (CHECK_ATTR(BPF_PROG_ATTACH))
  795. return -EINVAL;
  796. if (attr->attach_flags & ~BPF_F_ALLOW_OVERRIDE)
  797. return -EINVAL;
  798. switch (attr->attach_type) {
  799. case BPF_CGROUP_INET_INGRESS:
  800. case BPF_CGROUP_INET_EGRESS:
  801. ptype = BPF_PROG_TYPE_CGROUP_SKB;
  802. break;
  803. case BPF_CGROUP_INET_SOCK_CREATE:
  804. ptype = BPF_PROG_TYPE_CGROUP_SOCK;
  805. break;
  806. default:
  807. return -EINVAL;
  808. }
  809. prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
  810. if (IS_ERR(prog))
  811. return PTR_ERR(prog);
  812. cgrp = cgroup_get_from_fd(attr->target_fd);
  813. if (IS_ERR(cgrp)) {
  814. bpf_prog_put(prog);
  815. return PTR_ERR(cgrp);
  816. }
  817. ret = cgroup_bpf_update(cgrp, prog, attr->attach_type,
  818. attr->attach_flags & BPF_F_ALLOW_OVERRIDE);
  819. if (ret)
  820. bpf_prog_put(prog);
  821. cgroup_put(cgrp);
  822. return ret;
  823. }
  824. #define BPF_PROG_DETACH_LAST_FIELD attach_type
  825. static int bpf_prog_detach(const union bpf_attr *attr)
  826. {
  827. struct cgroup *cgrp;
  828. int ret;
  829. if (!capable(CAP_NET_ADMIN))
  830. return -EPERM;
  831. if (CHECK_ATTR(BPF_PROG_DETACH))
  832. return -EINVAL;
  833. switch (attr->attach_type) {
  834. case BPF_CGROUP_INET_INGRESS:
  835. case BPF_CGROUP_INET_EGRESS:
  836. case BPF_CGROUP_INET_SOCK_CREATE:
  837. cgrp = cgroup_get_from_fd(attr->target_fd);
  838. if (IS_ERR(cgrp))
  839. return PTR_ERR(cgrp);
  840. ret = cgroup_bpf_update(cgrp, NULL, attr->attach_type, false);
  841. cgroup_put(cgrp);
  842. break;
  843. default:
  844. return -EINVAL;
  845. }
  846. return ret;
  847. }
  848. #endif /* CONFIG_CGROUP_BPF */
  849. SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
  850. {
  851. union bpf_attr attr = {};
  852. int err;
  853. if (!capable(CAP_SYS_ADMIN) && sysctl_unprivileged_bpf_disabled)
  854. return -EPERM;
  855. if (!access_ok(VERIFY_READ, uattr, 1))
  856. return -EFAULT;
  857. if (size > PAGE_SIZE) /* silly large */
  858. return -E2BIG;
  859. /* If we're handed a bigger struct than we know of,
  860. * ensure all the unknown bits are 0 - i.e. new
  861. * user-space does not rely on any kernel feature
  862. * extensions we dont know about yet.
  863. */
  864. if (size > sizeof(attr)) {
  865. unsigned char __user *addr;
  866. unsigned char __user *end;
  867. unsigned char val;
  868. addr = (void __user *)uattr + sizeof(attr);
  869. end = (void __user *)uattr + size;
  870. for (; addr < end; addr++) {
  871. err = get_user(val, addr);
  872. if (err)
  873. return err;
  874. if (val)
  875. return -E2BIG;
  876. }
  877. size = sizeof(attr);
  878. }
  879. /* copy attributes from user space, may be less than sizeof(bpf_attr) */
  880. if (copy_from_user(&attr, uattr, size) != 0)
  881. return -EFAULT;
  882. switch (cmd) {
  883. case BPF_MAP_CREATE:
  884. err = map_create(&attr);
  885. break;
  886. case BPF_MAP_LOOKUP_ELEM:
  887. err = map_lookup_elem(&attr);
  888. break;
  889. case BPF_MAP_UPDATE_ELEM:
  890. err = map_update_elem(&attr);
  891. break;
  892. case BPF_MAP_DELETE_ELEM:
  893. err = map_delete_elem(&attr);
  894. break;
  895. case BPF_MAP_GET_NEXT_KEY:
  896. err = map_get_next_key(&attr);
  897. break;
  898. case BPF_PROG_LOAD:
  899. err = bpf_prog_load(&attr);
  900. break;
  901. case BPF_OBJ_PIN:
  902. err = bpf_obj_pin(&attr);
  903. break;
  904. case BPF_OBJ_GET:
  905. err = bpf_obj_get(&attr);
  906. break;
  907. #ifdef CONFIG_CGROUP_BPF
  908. case BPF_PROG_ATTACH:
  909. err = bpf_prog_attach(&attr);
  910. break;
  911. case BPF_PROG_DETACH:
  912. err = bpf_prog_detach(&attr);
  913. break;
  914. #endif
  915. default:
  916. err = -EINVAL;
  917. break;
  918. }
  919. return err;
  920. }