syscall.c 24 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076
  1. /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
  2. *
  3. * This program is free software; you can redistribute it and/or
  4. * modify it under the terms of version 2 of the GNU General Public
  5. * License as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful, but
  8. * WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  10. * General Public License for more details.
  11. */
  12. #include <linux/bpf.h>
  13. #include <linux/bpf_trace.h>
  14. #include <linux/syscalls.h>
  15. #include <linux/slab.h>
  16. #include <linux/sched/signal.h>
  17. #include <linux/vmalloc.h>
  18. #include <linux/mmzone.h>
  19. #include <linux/anon_inodes.h>
  20. #include <linux/file.h>
  21. #include <linux/license.h>
  22. #include <linux/filter.h>
  23. #include <linux/version.h>
  24. #include <linux/kernel.h>
  25. DEFINE_PER_CPU(int, bpf_prog_active);
  26. int sysctl_unprivileged_bpf_disabled __read_mostly;
  27. static LIST_HEAD(bpf_map_types);
  28. static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
  29. {
  30. struct bpf_map_type_list *tl;
  31. struct bpf_map *map;
  32. list_for_each_entry(tl, &bpf_map_types, list_node) {
  33. if (tl->type == attr->map_type) {
  34. map = tl->ops->map_alloc(attr);
  35. if (IS_ERR(map))
  36. return map;
  37. map->ops = tl->ops;
  38. map->map_type = attr->map_type;
  39. return map;
  40. }
  41. }
  42. return ERR_PTR(-EINVAL);
  43. }
  44. /* boot time registration of different map implementations */
  45. void bpf_register_map_type(struct bpf_map_type_list *tl)
  46. {
  47. list_add(&tl->list_node, &bpf_map_types);
  48. }
  49. void *bpf_map_area_alloc(size_t size)
  50. {
  51. /* We definitely need __GFP_NORETRY, so OOM killer doesn't
  52. * trigger under memory pressure as we really just want to
  53. * fail instead.
  54. */
  55. const gfp_t flags = __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO;
  56. void *area;
  57. if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
  58. area = kmalloc(size, GFP_USER | flags);
  59. if (area != NULL)
  60. return area;
  61. }
  62. return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | flags,
  63. PAGE_KERNEL);
  64. }
  65. void bpf_map_area_free(void *area)
  66. {
  67. kvfree(area);
  68. }
  69. int bpf_map_precharge_memlock(u32 pages)
  70. {
  71. struct user_struct *user = get_current_user();
  72. unsigned long memlock_limit, cur;
  73. memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
  74. cur = atomic_long_read(&user->locked_vm);
  75. free_uid(user);
  76. if (cur + pages > memlock_limit)
  77. return -EPERM;
  78. return 0;
  79. }
  80. static int bpf_map_charge_memlock(struct bpf_map *map)
  81. {
  82. struct user_struct *user = get_current_user();
  83. unsigned long memlock_limit;
  84. memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
  85. atomic_long_add(map->pages, &user->locked_vm);
  86. if (atomic_long_read(&user->locked_vm) > memlock_limit) {
  87. atomic_long_sub(map->pages, &user->locked_vm);
  88. free_uid(user);
  89. return -EPERM;
  90. }
  91. map->user = user;
  92. return 0;
  93. }
  94. static void bpf_map_uncharge_memlock(struct bpf_map *map)
  95. {
  96. struct user_struct *user = map->user;
  97. atomic_long_sub(map->pages, &user->locked_vm);
  98. free_uid(user);
  99. }
  100. /* called from workqueue */
  101. static void bpf_map_free_deferred(struct work_struct *work)
  102. {
  103. struct bpf_map *map = container_of(work, struct bpf_map, work);
  104. bpf_map_uncharge_memlock(map);
  105. /* implementation dependent freeing */
  106. map->ops->map_free(map);
  107. }
  108. static void bpf_map_put_uref(struct bpf_map *map)
  109. {
  110. if (atomic_dec_and_test(&map->usercnt)) {
  111. if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY)
  112. bpf_fd_array_map_clear(map);
  113. }
  114. }
  115. /* decrement map refcnt and schedule it for freeing via workqueue
  116. * (unrelying map implementation ops->map_free() might sleep)
  117. */
  118. void bpf_map_put(struct bpf_map *map)
  119. {
  120. if (atomic_dec_and_test(&map->refcnt)) {
  121. INIT_WORK(&map->work, bpf_map_free_deferred);
  122. schedule_work(&map->work);
  123. }
  124. }
  125. void bpf_map_put_with_uref(struct bpf_map *map)
  126. {
  127. bpf_map_put_uref(map);
  128. bpf_map_put(map);
  129. }
  130. static int bpf_map_release(struct inode *inode, struct file *filp)
  131. {
  132. struct bpf_map *map = filp->private_data;
  133. if (map->ops->map_release)
  134. map->ops->map_release(map, filp);
  135. bpf_map_put_with_uref(map);
  136. return 0;
  137. }
  138. #ifdef CONFIG_PROC_FS
  139. static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
  140. {
  141. const struct bpf_map *map = filp->private_data;
  142. const struct bpf_array *array;
  143. u32 owner_prog_type = 0;
  144. if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) {
  145. array = container_of(map, struct bpf_array, map);
  146. owner_prog_type = array->owner_prog_type;
  147. }
  148. seq_printf(m,
  149. "map_type:\t%u\n"
  150. "key_size:\t%u\n"
  151. "value_size:\t%u\n"
  152. "max_entries:\t%u\n"
  153. "map_flags:\t%#x\n"
  154. "memlock:\t%llu\n",
  155. map->map_type,
  156. map->key_size,
  157. map->value_size,
  158. map->max_entries,
  159. map->map_flags,
  160. map->pages * 1ULL << PAGE_SHIFT);
  161. if (owner_prog_type)
  162. seq_printf(m, "owner_prog_type:\t%u\n",
  163. owner_prog_type);
  164. }
  165. #endif
  166. static const struct file_operations bpf_map_fops = {
  167. #ifdef CONFIG_PROC_FS
  168. .show_fdinfo = bpf_map_show_fdinfo,
  169. #endif
  170. .release = bpf_map_release,
  171. };
  172. int bpf_map_new_fd(struct bpf_map *map)
  173. {
  174. return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
  175. O_RDWR | O_CLOEXEC);
  176. }
  177. /* helper macro to check that unused fields 'union bpf_attr' are zero */
  178. #define CHECK_ATTR(CMD) \
  179. memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
  180. sizeof(attr->CMD##_LAST_FIELD), 0, \
  181. sizeof(*attr) - \
  182. offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
  183. sizeof(attr->CMD##_LAST_FIELD)) != NULL
  184. #define BPF_MAP_CREATE_LAST_FIELD inner_map_fd
  185. /* called via syscall */
  186. static int map_create(union bpf_attr *attr)
  187. {
  188. struct bpf_map *map;
  189. int err;
  190. err = CHECK_ATTR(BPF_MAP_CREATE);
  191. if (err)
  192. return -EINVAL;
  193. /* find map type and init map: hashtable vs rbtree vs bloom vs ... */
  194. map = find_and_alloc_map(attr);
  195. if (IS_ERR(map))
  196. return PTR_ERR(map);
  197. atomic_set(&map->refcnt, 1);
  198. atomic_set(&map->usercnt, 1);
  199. err = bpf_map_charge_memlock(map);
  200. if (err)
  201. goto free_map_nouncharge;
  202. err = bpf_map_new_fd(map);
  203. if (err < 0)
  204. /* failed to allocate fd */
  205. goto free_map;
  206. trace_bpf_map_create(map, err);
  207. return err;
  208. free_map:
  209. bpf_map_uncharge_memlock(map);
  210. free_map_nouncharge:
  211. map->ops->map_free(map);
  212. return err;
  213. }
  214. /* if error is returned, fd is released.
  215. * On success caller should complete fd access with matching fdput()
  216. */
  217. struct bpf_map *__bpf_map_get(struct fd f)
  218. {
  219. if (!f.file)
  220. return ERR_PTR(-EBADF);
  221. if (f.file->f_op != &bpf_map_fops) {
  222. fdput(f);
  223. return ERR_PTR(-EINVAL);
  224. }
  225. return f.file->private_data;
  226. }
  227. /* prog's and map's refcnt limit */
  228. #define BPF_MAX_REFCNT 32768
  229. struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref)
  230. {
  231. if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) {
  232. atomic_dec(&map->refcnt);
  233. return ERR_PTR(-EBUSY);
  234. }
  235. if (uref)
  236. atomic_inc(&map->usercnt);
  237. return map;
  238. }
  239. struct bpf_map *bpf_map_get_with_uref(u32 ufd)
  240. {
  241. struct fd f = fdget(ufd);
  242. struct bpf_map *map;
  243. map = __bpf_map_get(f);
  244. if (IS_ERR(map))
  245. return map;
  246. map = bpf_map_inc(map, true);
  247. fdput(f);
  248. return map;
  249. }
  250. int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
  251. {
  252. return -ENOTSUPP;
  253. }
  254. /* last field in 'union bpf_attr' used by this command */
  255. #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD value
  256. static int map_lookup_elem(union bpf_attr *attr)
  257. {
  258. void __user *ukey = u64_to_user_ptr(attr->key);
  259. void __user *uvalue = u64_to_user_ptr(attr->value);
  260. int ufd = attr->map_fd;
  261. struct bpf_map *map;
  262. void *key, *value, *ptr;
  263. u32 value_size;
  264. struct fd f;
  265. int err;
  266. if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
  267. return -EINVAL;
  268. f = fdget(ufd);
  269. map = __bpf_map_get(f);
  270. if (IS_ERR(map))
  271. return PTR_ERR(map);
  272. err = -ENOMEM;
  273. key = kmalloc(map->key_size, GFP_USER);
  274. if (!key)
  275. goto err_put;
  276. err = -EFAULT;
  277. if (copy_from_user(key, ukey, map->key_size) != 0)
  278. goto free_key;
  279. if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
  280. map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
  281. map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
  282. value_size = round_up(map->value_size, 8) * num_possible_cpus();
  283. else
  284. value_size = map->value_size;
  285. err = -ENOMEM;
  286. value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
  287. if (!value)
  288. goto free_key;
  289. if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
  290. map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
  291. err = bpf_percpu_hash_copy(map, key, value);
  292. } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
  293. err = bpf_percpu_array_copy(map, key, value);
  294. } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
  295. err = bpf_stackmap_copy(map, key, value);
  296. } else if (map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
  297. map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
  298. err = -ENOTSUPP;
  299. } else {
  300. rcu_read_lock();
  301. ptr = map->ops->map_lookup_elem(map, key);
  302. if (ptr)
  303. memcpy(value, ptr, value_size);
  304. rcu_read_unlock();
  305. err = ptr ? 0 : -ENOENT;
  306. }
  307. if (err)
  308. goto free_value;
  309. err = -EFAULT;
  310. if (copy_to_user(uvalue, value, value_size) != 0)
  311. goto free_value;
  312. trace_bpf_map_lookup_elem(map, ufd, key, value);
  313. err = 0;
  314. free_value:
  315. kfree(value);
  316. free_key:
  317. kfree(key);
  318. err_put:
  319. fdput(f);
  320. return err;
  321. }
  322. #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
  323. static int map_update_elem(union bpf_attr *attr)
  324. {
  325. void __user *ukey = u64_to_user_ptr(attr->key);
  326. void __user *uvalue = u64_to_user_ptr(attr->value);
  327. int ufd = attr->map_fd;
  328. struct bpf_map *map;
  329. void *key, *value;
  330. u32 value_size;
  331. struct fd f;
  332. int err;
  333. if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
  334. return -EINVAL;
  335. f = fdget(ufd);
  336. map = __bpf_map_get(f);
  337. if (IS_ERR(map))
  338. return PTR_ERR(map);
  339. err = -ENOMEM;
  340. key = kmalloc(map->key_size, GFP_USER);
  341. if (!key)
  342. goto err_put;
  343. err = -EFAULT;
  344. if (copy_from_user(key, ukey, map->key_size) != 0)
  345. goto free_key;
  346. if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
  347. map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
  348. map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
  349. value_size = round_up(map->value_size, 8) * num_possible_cpus();
  350. else
  351. value_size = map->value_size;
  352. err = -ENOMEM;
  353. value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
  354. if (!value)
  355. goto free_key;
  356. err = -EFAULT;
  357. if (copy_from_user(value, uvalue, value_size) != 0)
  358. goto free_value;
  359. /* must increment bpf_prog_active to avoid kprobe+bpf triggering from
  360. * inside bpf map update or delete otherwise deadlocks are possible
  361. */
  362. preempt_disable();
  363. __this_cpu_inc(bpf_prog_active);
  364. if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
  365. map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
  366. err = bpf_percpu_hash_update(map, key, value, attr->flags);
  367. } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
  368. err = bpf_percpu_array_update(map, key, value, attr->flags);
  369. } else if (map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY ||
  370. map->map_type == BPF_MAP_TYPE_PROG_ARRAY ||
  371. map->map_type == BPF_MAP_TYPE_CGROUP_ARRAY ||
  372. map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) {
  373. rcu_read_lock();
  374. err = bpf_fd_array_map_update_elem(map, f.file, key, value,
  375. attr->flags);
  376. rcu_read_unlock();
  377. } else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
  378. rcu_read_lock();
  379. err = bpf_fd_htab_map_update_elem(map, f.file, key, value,
  380. attr->flags);
  381. rcu_read_unlock();
  382. } else {
  383. rcu_read_lock();
  384. err = map->ops->map_update_elem(map, key, value, attr->flags);
  385. rcu_read_unlock();
  386. }
  387. __this_cpu_dec(bpf_prog_active);
  388. preempt_enable();
  389. if (!err)
  390. trace_bpf_map_update_elem(map, ufd, key, value);
  391. free_value:
  392. kfree(value);
  393. free_key:
  394. kfree(key);
  395. err_put:
  396. fdput(f);
  397. return err;
  398. }
  399. #define BPF_MAP_DELETE_ELEM_LAST_FIELD key
  400. static int map_delete_elem(union bpf_attr *attr)
  401. {
  402. void __user *ukey = u64_to_user_ptr(attr->key);
  403. int ufd = attr->map_fd;
  404. struct bpf_map *map;
  405. struct fd f;
  406. void *key;
  407. int err;
  408. if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
  409. return -EINVAL;
  410. f = fdget(ufd);
  411. map = __bpf_map_get(f);
  412. if (IS_ERR(map))
  413. return PTR_ERR(map);
  414. err = -ENOMEM;
  415. key = kmalloc(map->key_size, GFP_USER);
  416. if (!key)
  417. goto err_put;
  418. err = -EFAULT;
  419. if (copy_from_user(key, ukey, map->key_size) != 0)
  420. goto free_key;
  421. preempt_disable();
  422. __this_cpu_inc(bpf_prog_active);
  423. rcu_read_lock();
  424. err = map->ops->map_delete_elem(map, key);
  425. rcu_read_unlock();
  426. __this_cpu_dec(bpf_prog_active);
  427. preempt_enable();
  428. if (!err)
  429. trace_bpf_map_delete_elem(map, ufd, key);
  430. free_key:
  431. kfree(key);
  432. err_put:
  433. fdput(f);
  434. return err;
  435. }
  436. /* last field in 'union bpf_attr' used by this command */
  437. #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
  438. static int map_get_next_key(union bpf_attr *attr)
  439. {
  440. void __user *ukey = u64_to_user_ptr(attr->key);
  441. void __user *unext_key = u64_to_user_ptr(attr->next_key);
  442. int ufd = attr->map_fd;
  443. struct bpf_map *map;
  444. void *key, *next_key;
  445. struct fd f;
  446. int err;
  447. if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
  448. return -EINVAL;
  449. f = fdget(ufd);
  450. map = __bpf_map_get(f);
  451. if (IS_ERR(map))
  452. return PTR_ERR(map);
  453. err = -ENOMEM;
  454. key = kmalloc(map->key_size, GFP_USER);
  455. if (!key)
  456. goto err_put;
  457. err = -EFAULT;
  458. if (copy_from_user(key, ukey, map->key_size) != 0)
  459. goto free_key;
  460. err = -ENOMEM;
  461. next_key = kmalloc(map->key_size, GFP_USER);
  462. if (!next_key)
  463. goto free_key;
  464. rcu_read_lock();
  465. err = map->ops->map_get_next_key(map, key, next_key);
  466. rcu_read_unlock();
  467. if (err)
  468. goto free_next_key;
  469. err = -EFAULT;
  470. if (copy_to_user(unext_key, next_key, map->key_size) != 0)
  471. goto free_next_key;
  472. trace_bpf_map_next_key(map, ufd, key, next_key);
  473. err = 0;
  474. free_next_key:
  475. kfree(next_key);
  476. free_key:
  477. kfree(key);
  478. err_put:
  479. fdput(f);
  480. return err;
  481. }
  482. static const struct bpf_verifier_ops * const bpf_prog_types[] = {
  483. #define BPF_PROG_TYPE(_id, _ops) \
  484. [_id] = &_ops,
  485. #include <linux/bpf_types.h>
  486. #undef BPF_PROG_TYPE
  487. };
  488. static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
  489. {
  490. if (type >= ARRAY_SIZE(bpf_prog_types) || !bpf_prog_types[type])
  491. return -EINVAL;
  492. prog->aux->ops = bpf_prog_types[type];
  493. prog->type = type;
  494. return 0;
  495. }
  496. /* drop refcnt on maps used by eBPF program and free auxilary data */
  497. static void free_used_maps(struct bpf_prog_aux *aux)
  498. {
  499. int i;
  500. for (i = 0; i < aux->used_map_cnt; i++)
  501. bpf_map_put(aux->used_maps[i]);
  502. kfree(aux->used_maps);
  503. }
  504. int __bpf_prog_charge(struct user_struct *user, u32 pages)
  505. {
  506. unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
  507. unsigned long user_bufs;
  508. if (user) {
  509. user_bufs = atomic_long_add_return(pages, &user->locked_vm);
  510. if (user_bufs > memlock_limit) {
  511. atomic_long_sub(pages, &user->locked_vm);
  512. return -EPERM;
  513. }
  514. }
  515. return 0;
  516. }
  517. void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
  518. {
  519. if (user)
  520. atomic_long_sub(pages, &user->locked_vm);
  521. }
  522. static int bpf_prog_charge_memlock(struct bpf_prog *prog)
  523. {
  524. struct user_struct *user = get_current_user();
  525. int ret;
  526. ret = __bpf_prog_charge(user, prog->pages);
  527. if (ret) {
  528. free_uid(user);
  529. return ret;
  530. }
  531. prog->aux->user = user;
  532. return 0;
  533. }
  534. static void bpf_prog_uncharge_memlock(struct bpf_prog *prog)
  535. {
  536. struct user_struct *user = prog->aux->user;
  537. __bpf_prog_uncharge(user, prog->pages);
  538. free_uid(user);
  539. }
  540. static void __bpf_prog_put_rcu(struct rcu_head *rcu)
  541. {
  542. struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
  543. free_used_maps(aux);
  544. bpf_prog_uncharge_memlock(aux->prog);
  545. bpf_prog_free(aux->prog);
  546. }
  547. void bpf_prog_put(struct bpf_prog *prog)
  548. {
  549. if (atomic_dec_and_test(&prog->aux->refcnt)) {
  550. trace_bpf_prog_put_rcu(prog);
  551. bpf_prog_kallsyms_del(prog);
  552. call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
  553. }
  554. }
  555. EXPORT_SYMBOL_GPL(bpf_prog_put);
  556. static int bpf_prog_release(struct inode *inode, struct file *filp)
  557. {
  558. struct bpf_prog *prog = filp->private_data;
  559. bpf_prog_put(prog);
  560. return 0;
  561. }
  562. #ifdef CONFIG_PROC_FS
  563. static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
  564. {
  565. const struct bpf_prog *prog = filp->private_data;
  566. char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
  567. bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
  568. seq_printf(m,
  569. "prog_type:\t%u\n"
  570. "prog_jited:\t%u\n"
  571. "prog_tag:\t%s\n"
  572. "memlock:\t%llu\n",
  573. prog->type,
  574. prog->jited,
  575. prog_tag,
  576. prog->pages * 1ULL << PAGE_SHIFT);
  577. }
  578. #endif
  579. static const struct file_operations bpf_prog_fops = {
  580. #ifdef CONFIG_PROC_FS
  581. .show_fdinfo = bpf_prog_show_fdinfo,
  582. #endif
  583. .release = bpf_prog_release,
  584. };
  585. int bpf_prog_new_fd(struct bpf_prog *prog)
  586. {
  587. return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
  588. O_RDWR | O_CLOEXEC);
  589. }
  590. static struct bpf_prog *____bpf_prog_get(struct fd f)
  591. {
  592. if (!f.file)
  593. return ERR_PTR(-EBADF);
  594. if (f.file->f_op != &bpf_prog_fops) {
  595. fdput(f);
  596. return ERR_PTR(-EINVAL);
  597. }
  598. return f.file->private_data;
  599. }
  600. struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i)
  601. {
  602. if (atomic_add_return(i, &prog->aux->refcnt) > BPF_MAX_REFCNT) {
  603. atomic_sub(i, &prog->aux->refcnt);
  604. return ERR_PTR(-EBUSY);
  605. }
  606. return prog;
  607. }
  608. EXPORT_SYMBOL_GPL(bpf_prog_add);
  609. void bpf_prog_sub(struct bpf_prog *prog, int i)
  610. {
  611. /* Only to be used for undoing previous bpf_prog_add() in some
  612. * error path. We still know that another entity in our call
  613. * path holds a reference to the program, thus atomic_sub() can
  614. * be safely used in such cases!
  615. */
  616. WARN_ON(atomic_sub_return(i, &prog->aux->refcnt) == 0);
  617. }
  618. EXPORT_SYMBOL_GPL(bpf_prog_sub);
  619. struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
  620. {
  621. return bpf_prog_add(prog, 1);
  622. }
  623. EXPORT_SYMBOL_GPL(bpf_prog_inc);
  624. static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *type)
  625. {
  626. struct fd f = fdget(ufd);
  627. struct bpf_prog *prog;
  628. prog = ____bpf_prog_get(f);
  629. if (IS_ERR(prog))
  630. return prog;
  631. if (type && prog->type != *type) {
  632. prog = ERR_PTR(-EINVAL);
  633. goto out;
  634. }
  635. prog = bpf_prog_inc(prog);
  636. out:
  637. fdput(f);
  638. return prog;
  639. }
  640. struct bpf_prog *bpf_prog_get(u32 ufd)
  641. {
  642. return __bpf_prog_get(ufd, NULL);
  643. }
  644. struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type)
  645. {
  646. struct bpf_prog *prog = __bpf_prog_get(ufd, &type);
  647. if (!IS_ERR(prog))
  648. trace_bpf_prog_get_type(prog);
  649. return prog;
  650. }
  651. EXPORT_SYMBOL_GPL(bpf_prog_get_type);
  652. /* last field in 'union bpf_attr' used by this command */
  653. #define BPF_PROG_LOAD_LAST_FIELD kern_version
  654. static int bpf_prog_load(union bpf_attr *attr)
  655. {
  656. enum bpf_prog_type type = attr->prog_type;
  657. struct bpf_prog *prog;
  658. int err;
  659. char license[128];
  660. bool is_gpl;
  661. if (CHECK_ATTR(BPF_PROG_LOAD))
  662. return -EINVAL;
  663. /* copy eBPF program license from user space */
  664. if (strncpy_from_user(license, u64_to_user_ptr(attr->license),
  665. sizeof(license) - 1) < 0)
  666. return -EFAULT;
  667. license[sizeof(license) - 1] = 0;
  668. /* eBPF programs must be GPL compatible to use GPL-ed functions */
  669. is_gpl = license_is_gpl_compatible(license);
  670. if (attr->insn_cnt == 0 || attr->insn_cnt > BPF_MAXINSNS)
  671. return -E2BIG;
  672. if (type == BPF_PROG_TYPE_KPROBE &&
  673. attr->kern_version != LINUX_VERSION_CODE)
  674. return -EINVAL;
  675. if (type != BPF_PROG_TYPE_SOCKET_FILTER && !capable(CAP_SYS_ADMIN))
  676. return -EPERM;
  677. /* plain bpf_prog allocation */
  678. prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
  679. if (!prog)
  680. return -ENOMEM;
  681. err = bpf_prog_charge_memlock(prog);
  682. if (err)
  683. goto free_prog_nouncharge;
  684. prog->len = attr->insn_cnt;
  685. err = -EFAULT;
  686. if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns),
  687. bpf_prog_insn_size(prog)) != 0)
  688. goto free_prog;
  689. prog->orig_prog = NULL;
  690. prog->jited = 0;
  691. atomic_set(&prog->aux->refcnt, 1);
  692. prog->gpl_compatible = is_gpl ? 1 : 0;
  693. /* find program type: socket_filter vs tracing_filter */
  694. err = find_prog_type(type, prog);
  695. if (err < 0)
  696. goto free_prog;
  697. /* run eBPF verifier */
  698. err = bpf_check(&prog, attr);
  699. if (err < 0)
  700. goto free_used_maps;
  701. /* eBPF program is ready to be JITed */
  702. prog = bpf_prog_select_runtime(prog, &err);
  703. if (err < 0)
  704. goto free_used_maps;
  705. err = bpf_prog_new_fd(prog);
  706. if (err < 0)
  707. /* failed to allocate fd */
  708. goto free_used_maps;
  709. bpf_prog_kallsyms_add(prog);
  710. trace_bpf_prog_load(prog, err);
  711. return err;
  712. free_used_maps:
  713. free_used_maps(prog->aux);
  714. free_prog:
  715. bpf_prog_uncharge_memlock(prog);
  716. free_prog_nouncharge:
  717. bpf_prog_free(prog);
  718. return err;
  719. }
  720. #define BPF_OBJ_LAST_FIELD bpf_fd
  721. static int bpf_obj_pin(const union bpf_attr *attr)
  722. {
  723. if (CHECK_ATTR(BPF_OBJ))
  724. return -EINVAL;
  725. return bpf_obj_pin_user(attr->bpf_fd, u64_to_user_ptr(attr->pathname));
  726. }
  727. static int bpf_obj_get(const union bpf_attr *attr)
  728. {
  729. if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0)
  730. return -EINVAL;
  731. return bpf_obj_get_user(u64_to_user_ptr(attr->pathname));
  732. }
  733. #ifdef CONFIG_CGROUP_BPF
  734. #define BPF_PROG_ATTACH_LAST_FIELD attach_flags
  735. static int bpf_prog_attach(const union bpf_attr *attr)
  736. {
  737. enum bpf_prog_type ptype;
  738. struct bpf_prog *prog;
  739. struct cgroup *cgrp;
  740. int ret;
  741. if (!capable(CAP_NET_ADMIN))
  742. return -EPERM;
  743. if (CHECK_ATTR(BPF_PROG_ATTACH))
  744. return -EINVAL;
  745. if (attr->attach_flags & ~BPF_F_ALLOW_OVERRIDE)
  746. return -EINVAL;
  747. switch (attr->attach_type) {
  748. case BPF_CGROUP_INET_INGRESS:
  749. case BPF_CGROUP_INET_EGRESS:
  750. ptype = BPF_PROG_TYPE_CGROUP_SKB;
  751. break;
  752. case BPF_CGROUP_INET_SOCK_CREATE:
  753. ptype = BPF_PROG_TYPE_CGROUP_SOCK;
  754. break;
  755. default:
  756. return -EINVAL;
  757. }
  758. prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
  759. if (IS_ERR(prog))
  760. return PTR_ERR(prog);
  761. cgrp = cgroup_get_from_fd(attr->target_fd);
  762. if (IS_ERR(cgrp)) {
  763. bpf_prog_put(prog);
  764. return PTR_ERR(cgrp);
  765. }
  766. ret = cgroup_bpf_update(cgrp, prog, attr->attach_type,
  767. attr->attach_flags & BPF_F_ALLOW_OVERRIDE);
  768. if (ret)
  769. bpf_prog_put(prog);
  770. cgroup_put(cgrp);
  771. return ret;
  772. }
  773. #define BPF_PROG_DETACH_LAST_FIELD attach_type
  774. static int bpf_prog_detach(const union bpf_attr *attr)
  775. {
  776. struct cgroup *cgrp;
  777. int ret;
  778. if (!capable(CAP_NET_ADMIN))
  779. return -EPERM;
  780. if (CHECK_ATTR(BPF_PROG_DETACH))
  781. return -EINVAL;
  782. switch (attr->attach_type) {
  783. case BPF_CGROUP_INET_INGRESS:
  784. case BPF_CGROUP_INET_EGRESS:
  785. case BPF_CGROUP_INET_SOCK_CREATE:
  786. cgrp = cgroup_get_from_fd(attr->target_fd);
  787. if (IS_ERR(cgrp))
  788. return PTR_ERR(cgrp);
  789. ret = cgroup_bpf_update(cgrp, NULL, attr->attach_type, false);
  790. cgroup_put(cgrp);
  791. break;
  792. default:
  793. return -EINVAL;
  794. }
  795. return ret;
  796. }
  797. #endif /* CONFIG_CGROUP_BPF */
  798. #define BPF_PROG_TEST_RUN_LAST_FIELD test.duration
  799. static int bpf_prog_test_run(const union bpf_attr *attr,
  800. union bpf_attr __user *uattr)
  801. {
  802. struct bpf_prog *prog;
  803. int ret = -ENOTSUPP;
  804. if (CHECK_ATTR(BPF_PROG_TEST_RUN))
  805. return -EINVAL;
  806. prog = bpf_prog_get(attr->test.prog_fd);
  807. if (IS_ERR(prog))
  808. return PTR_ERR(prog);
  809. if (prog->aux->ops->test_run)
  810. ret = prog->aux->ops->test_run(prog, attr, uattr);
  811. bpf_prog_put(prog);
  812. return ret;
  813. }
  814. SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
  815. {
  816. union bpf_attr attr = {};
  817. int err;
  818. if (!capable(CAP_SYS_ADMIN) && sysctl_unprivileged_bpf_disabled)
  819. return -EPERM;
  820. if (!access_ok(VERIFY_READ, uattr, 1))
  821. return -EFAULT;
  822. if (size > PAGE_SIZE) /* silly large */
  823. return -E2BIG;
  824. /* If we're handed a bigger struct than we know of,
  825. * ensure all the unknown bits are 0 - i.e. new
  826. * user-space does not rely on any kernel feature
  827. * extensions we dont know about yet.
  828. */
  829. if (size > sizeof(attr)) {
  830. unsigned char __user *addr;
  831. unsigned char __user *end;
  832. unsigned char val;
  833. addr = (void __user *)uattr + sizeof(attr);
  834. end = (void __user *)uattr + size;
  835. for (; addr < end; addr++) {
  836. err = get_user(val, addr);
  837. if (err)
  838. return err;
  839. if (val)
  840. return -E2BIG;
  841. }
  842. size = sizeof(attr);
  843. }
  844. /* copy attributes from user space, may be less than sizeof(bpf_attr) */
  845. if (copy_from_user(&attr, uattr, size) != 0)
  846. return -EFAULT;
  847. switch (cmd) {
  848. case BPF_MAP_CREATE:
  849. err = map_create(&attr);
  850. break;
  851. case BPF_MAP_LOOKUP_ELEM:
  852. err = map_lookup_elem(&attr);
  853. break;
  854. case BPF_MAP_UPDATE_ELEM:
  855. err = map_update_elem(&attr);
  856. break;
  857. case BPF_MAP_DELETE_ELEM:
  858. err = map_delete_elem(&attr);
  859. break;
  860. case BPF_MAP_GET_NEXT_KEY:
  861. err = map_get_next_key(&attr);
  862. break;
  863. case BPF_PROG_LOAD:
  864. err = bpf_prog_load(&attr);
  865. break;
  866. case BPF_OBJ_PIN:
  867. err = bpf_obj_pin(&attr);
  868. break;
  869. case BPF_OBJ_GET:
  870. err = bpf_obj_get(&attr);
  871. break;
  872. #ifdef CONFIG_CGROUP_BPF
  873. case BPF_PROG_ATTACH:
  874. err = bpf_prog_attach(&attr);
  875. break;
  876. case BPF_PROG_DETACH:
  877. err = bpf_prog_detach(&attr);
  878. break;
  879. #endif
  880. case BPF_PROG_TEST_RUN:
  881. err = bpf_prog_test_run(&attr, uattr);
  882. break;
  883. default:
  884. err = -EINVAL;
  885. break;
  886. }
  887. return err;
  888. }