arraymap.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582
  1. /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
  2. *
  3. * This program is free software; you can redistribute it and/or
  4. * modify it under the terms of version 2 of the GNU General Public
  5. * License as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful, but
  8. * WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  10. * General Public License for more details.
  11. */
  12. #include <linux/bpf.h>
  13. #include <linux/err.h>
  14. #include <linux/vmalloc.h>
  15. #include <linux/slab.h>
  16. #include <linux/mm.h>
  17. #include <linux/filter.h>
  18. #include <linux/perf_event.h>
  19. static void bpf_array_free_percpu(struct bpf_array *array)
  20. {
  21. int i;
  22. for (i = 0; i < array->map.max_entries; i++)
  23. free_percpu(array->pptrs[i]);
  24. }
  25. static int bpf_array_alloc_percpu(struct bpf_array *array)
  26. {
  27. void __percpu *ptr;
  28. int i;
  29. for (i = 0; i < array->map.max_entries; i++) {
  30. ptr = __alloc_percpu_gfp(array->elem_size, 8,
  31. GFP_USER | __GFP_NOWARN);
  32. if (!ptr) {
  33. bpf_array_free_percpu(array);
  34. return -ENOMEM;
  35. }
  36. array->pptrs[i] = ptr;
  37. }
  38. return 0;
  39. }
  40. /* Called from syscall */
  41. static struct bpf_map *array_map_alloc(union bpf_attr *attr)
  42. {
  43. bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
  44. struct bpf_array *array;
  45. u64 array_size;
  46. u32 elem_size;
  47. /* check sanity of attributes */
  48. if (attr->max_entries == 0 || attr->key_size != 4 ||
  49. attr->value_size == 0 || attr->map_flags)
  50. return ERR_PTR(-EINVAL);
  51. if (attr->value_size > KMALLOC_MAX_SIZE)
  52. /* if value_size is bigger, the user space won't be able to
  53. * access the elements.
  54. */
  55. return ERR_PTR(-E2BIG);
  56. elem_size = round_up(attr->value_size, 8);
  57. array_size = sizeof(*array);
  58. if (percpu)
  59. array_size += (u64) attr->max_entries * sizeof(void *);
  60. else
  61. array_size += (u64) attr->max_entries * elem_size;
  62. /* make sure there is no u32 overflow later in round_up() */
  63. if (array_size >= U32_MAX - PAGE_SIZE)
  64. return ERR_PTR(-ENOMEM);
  65. /* allocate all map elements and zero-initialize them */
  66. array = kzalloc(array_size, GFP_USER | __GFP_NOWARN);
  67. if (!array) {
  68. array = vzalloc(array_size);
  69. if (!array)
  70. return ERR_PTR(-ENOMEM);
  71. }
  72. /* copy mandatory map attributes */
  73. array->map.map_type = attr->map_type;
  74. array->map.key_size = attr->key_size;
  75. array->map.value_size = attr->value_size;
  76. array->map.max_entries = attr->max_entries;
  77. array->elem_size = elem_size;
  78. if (!percpu)
  79. goto out;
  80. array_size += (u64) attr->max_entries * elem_size * num_possible_cpus();
  81. if (array_size >= U32_MAX - PAGE_SIZE ||
  82. elem_size > PCPU_MIN_UNIT_SIZE || bpf_array_alloc_percpu(array)) {
  83. kvfree(array);
  84. return ERR_PTR(-ENOMEM);
  85. }
  86. out:
  87. array->map.pages = round_up(array_size, PAGE_SIZE) >> PAGE_SHIFT;
  88. return &array->map;
  89. }
  90. /* Called from syscall or from eBPF program */
  91. static void *array_map_lookup_elem(struct bpf_map *map, void *key)
  92. {
  93. struct bpf_array *array = container_of(map, struct bpf_array, map);
  94. u32 index = *(u32 *)key;
  95. if (unlikely(index >= array->map.max_entries))
  96. return NULL;
  97. return array->value + array->elem_size * index;
  98. }
  99. /* Called from eBPF program */
  100. static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
  101. {
  102. struct bpf_array *array = container_of(map, struct bpf_array, map);
  103. u32 index = *(u32 *)key;
  104. if (unlikely(index >= array->map.max_entries))
  105. return NULL;
  106. return this_cpu_ptr(array->pptrs[index]);
  107. }
  108. int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
  109. {
  110. struct bpf_array *array = container_of(map, struct bpf_array, map);
  111. u32 index = *(u32 *)key;
  112. void __percpu *pptr;
  113. int cpu, off = 0;
  114. u32 size;
  115. if (unlikely(index >= array->map.max_entries))
  116. return -ENOENT;
  117. /* per_cpu areas are zero-filled and bpf programs can only
  118. * access 'value_size' of them, so copying rounded areas
  119. * will not leak any kernel data
  120. */
  121. size = round_up(map->value_size, 8);
  122. rcu_read_lock();
  123. pptr = array->pptrs[index];
  124. for_each_possible_cpu(cpu) {
  125. bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size);
  126. off += size;
  127. }
  128. rcu_read_unlock();
  129. return 0;
  130. }
  131. /* Called from syscall */
  132. static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
  133. {
  134. struct bpf_array *array = container_of(map, struct bpf_array, map);
  135. u32 index = *(u32 *)key;
  136. u32 *next = (u32 *)next_key;
  137. if (index >= array->map.max_entries) {
  138. *next = 0;
  139. return 0;
  140. }
  141. if (index == array->map.max_entries - 1)
  142. return -ENOENT;
  143. *next = index + 1;
  144. return 0;
  145. }
  146. /* Called from syscall or from eBPF program */
  147. static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
  148. u64 map_flags)
  149. {
  150. struct bpf_array *array = container_of(map, struct bpf_array, map);
  151. u32 index = *(u32 *)key;
  152. if (unlikely(map_flags > BPF_EXIST))
  153. /* unknown flags */
  154. return -EINVAL;
  155. if (unlikely(index >= array->map.max_entries))
  156. /* all elements were pre-allocated, cannot insert a new one */
  157. return -E2BIG;
  158. if (unlikely(map_flags == BPF_NOEXIST))
  159. /* all elements already exist */
  160. return -EEXIST;
  161. if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
  162. memcpy(this_cpu_ptr(array->pptrs[index]),
  163. value, map->value_size);
  164. else
  165. memcpy(array->value + array->elem_size * index,
  166. value, map->value_size);
  167. return 0;
  168. }
  169. int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
  170. u64 map_flags)
  171. {
  172. struct bpf_array *array = container_of(map, struct bpf_array, map);
  173. u32 index = *(u32 *)key;
  174. void __percpu *pptr;
  175. int cpu, off = 0;
  176. u32 size;
  177. if (unlikely(map_flags > BPF_EXIST))
  178. /* unknown flags */
  179. return -EINVAL;
  180. if (unlikely(index >= array->map.max_entries))
  181. /* all elements were pre-allocated, cannot insert a new one */
  182. return -E2BIG;
  183. if (unlikely(map_flags == BPF_NOEXIST))
  184. /* all elements already exist */
  185. return -EEXIST;
  186. /* the user space will provide round_up(value_size, 8) bytes that
  187. * will be copied into per-cpu area. bpf programs can only access
  188. * value_size of it. During lookup the same extra bytes will be
  189. * returned or zeros which were zero-filled by percpu_alloc,
  190. * so no kernel data leaks possible
  191. */
  192. size = round_up(map->value_size, 8);
  193. rcu_read_lock();
  194. pptr = array->pptrs[index];
  195. for_each_possible_cpu(cpu) {
  196. bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size);
  197. off += size;
  198. }
  199. rcu_read_unlock();
  200. return 0;
  201. }
  202. /* Called from syscall or from eBPF program */
  203. static int array_map_delete_elem(struct bpf_map *map, void *key)
  204. {
  205. return -EINVAL;
  206. }
  207. /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
  208. static void array_map_free(struct bpf_map *map)
  209. {
  210. struct bpf_array *array = container_of(map, struct bpf_array, map);
  211. /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
  212. * so the programs (can be more than one that used this map) were
  213. * disconnected from events. Wait for outstanding programs to complete
  214. * and free the array
  215. */
  216. synchronize_rcu();
  217. if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
  218. bpf_array_free_percpu(array);
  219. kvfree(array);
  220. }
  221. static const struct bpf_map_ops array_ops = {
  222. .map_alloc = array_map_alloc,
  223. .map_free = array_map_free,
  224. .map_get_next_key = array_map_get_next_key,
  225. .map_lookup_elem = array_map_lookup_elem,
  226. .map_update_elem = array_map_update_elem,
  227. .map_delete_elem = array_map_delete_elem,
  228. };
  229. static struct bpf_map_type_list array_type __read_mostly = {
  230. .ops = &array_ops,
  231. .type = BPF_MAP_TYPE_ARRAY,
  232. };
  233. static const struct bpf_map_ops percpu_array_ops = {
  234. .map_alloc = array_map_alloc,
  235. .map_free = array_map_free,
  236. .map_get_next_key = array_map_get_next_key,
  237. .map_lookup_elem = percpu_array_map_lookup_elem,
  238. .map_update_elem = array_map_update_elem,
  239. .map_delete_elem = array_map_delete_elem,
  240. };
  241. static struct bpf_map_type_list percpu_array_type __read_mostly = {
  242. .ops = &percpu_array_ops,
  243. .type = BPF_MAP_TYPE_PERCPU_ARRAY,
  244. };
  245. static int __init register_array_map(void)
  246. {
  247. bpf_register_map_type(&array_type);
  248. bpf_register_map_type(&percpu_array_type);
  249. return 0;
  250. }
  251. late_initcall(register_array_map);
  252. static struct bpf_map *fd_array_map_alloc(union bpf_attr *attr)
  253. {
  254. /* only file descriptors can be stored in this type of map */
  255. if (attr->value_size != sizeof(u32))
  256. return ERR_PTR(-EINVAL);
  257. return array_map_alloc(attr);
  258. }
  259. static void fd_array_map_free(struct bpf_map *map)
  260. {
  261. struct bpf_array *array = container_of(map, struct bpf_array, map);
  262. int i;
  263. synchronize_rcu();
  264. /* make sure it's empty */
  265. for (i = 0; i < array->map.max_entries; i++)
  266. BUG_ON(array->ptrs[i] != NULL);
  267. kvfree(array);
  268. }
  269. static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
  270. {
  271. return NULL;
  272. }
  273. /* only called from syscall */
  274. int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
  275. void *key, void *value, u64 map_flags)
  276. {
  277. struct bpf_array *array = container_of(map, struct bpf_array, map);
  278. void *new_ptr, *old_ptr;
  279. u32 index = *(u32 *)key, ufd;
  280. if (map_flags != BPF_ANY)
  281. return -EINVAL;
  282. if (index >= array->map.max_entries)
  283. return -E2BIG;
  284. ufd = *(u32 *)value;
  285. new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
  286. if (IS_ERR(new_ptr))
  287. return PTR_ERR(new_ptr);
  288. old_ptr = xchg(array->ptrs + index, new_ptr);
  289. if (old_ptr)
  290. map->ops->map_fd_put_ptr(old_ptr);
  291. return 0;
  292. }
  293. static int fd_array_map_delete_elem(struct bpf_map *map, void *key)
  294. {
  295. struct bpf_array *array = container_of(map, struct bpf_array, map);
  296. void *old_ptr;
  297. u32 index = *(u32 *)key;
  298. if (index >= array->map.max_entries)
  299. return -E2BIG;
  300. old_ptr = xchg(array->ptrs + index, NULL);
  301. if (old_ptr) {
  302. map->ops->map_fd_put_ptr(old_ptr);
  303. return 0;
  304. } else {
  305. return -ENOENT;
  306. }
  307. }
  308. static void *prog_fd_array_get_ptr(struct bpf_map *map,
  309. struct file *map_file, int fd)
  310. {
  311. struct bpf_array *array = container_of(map, struct bpf_array, map);
  312. struct bpf_prog *prog = bpf_prog_get(fd);
  313. if (IS_ERR(prog))
  314. return prog;
  315. if (!bpf_prog_array_compatible(array, prog)) {
  316. bpf_prog_put(prog);
  317. return ERR_PTR(-EINVAL);
  318. }
  319. return prog;
  320. }
  321. static void prog_fd_array_put_ptr(void *ptr)
  322. {
  323. bpf_prog_put(ptr);
  324. }
  325. /* decrement refcnt of all bpf_progs that are stored in this map */
  326. void bpf_fd_array_map_clear(struct bpf_map *map)
  327. {
  328. struct bpf_array *array = container_of(map, struct bpf_array, map);
  329. int i;
  330. for (i = 0; i < array->map.max_entries; i++)
  331. fd_array_map_delete_elem(map, &i);
  332. }
  333. static const struct bpf_map_ops prog_array_ops = {
  334. .map_alloc = fd_array_map_alloc,
  335. .map_free = fd_array_map_free,
  336. .map_get_next_key = array_map_get_next_key,
  337. .map_lookup_elem = fd_array_map_lookup_elem,
  338. .map_delete_elem = fd_array_map_delete_elem,
  339. .map_fd_get_ptr = prog_fd_array_get_ptr,
  340. .map_fd_put_ptr = prog_fd_array_put_ptr,
  341. };
  342. static struct bpf_map_type_list prog_array_type __read_mostly = {
  343. .ops = &prog_array_ops,
  344. .type = BPF_MAP_TYPE_PROG_ARRAY,
  345. };
  346. static int __init register_prog_array_map(void)
  347. {
  348. bpf_register_map_type(&prog_array_type);
  349. return 0;
  350. }
  351. late_initcall(register_prog_array_map);
  352. static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file,
  353. struct file *map_file)
  354. {
  355. struct bpf_event_entry *ee;
  356. ee = kzalloc(sizeof(*ee), GFP_ATOMIC);
  357. if (ee) {
  358. ee->event = perf_file->private_data;
  359. ee->perf_file = perf_file;
  360. ee->map_file = map_file;
  361. }
  362. return ee;
  363. }
  364. static void __bpf_event_entry_free(struct rcu_head *rcu)
  365. {
  366. struct bpf_event_entry *ee;
  367. ee = container_of(rcu, struct bpf_event_entry, rcu);
  368. fput(ee->perf_file);
  369. kfree(ee);
  370. }
  371. static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee)
  372. {
  373. call_rcu(&ee->rcu, __bpf_event_entry_free);
  374. }
  375. static void *perf_event_fd_array_get_ptr(struct bpf_map *map,
  376. struct file *map_file, int fd)
  377. {
  378. const struct perf_event_attr *attr;
  379. struct bpf_event_entry *ee;
  380. struct perf_event *event;
  381. struct file *perf_file;
  382. perf_file = perf_event_get(fd);
  383. if (IS_ERR(perf_file))
  384. return perf_file;
  385. event = perf_file->private_data;
  386. ee = ERR_PTR(-EINVAL);
  387. attr = perf_event_attrs(event);
  388. if (IS_ERR(attr) || attr->inherit)
  389. goto err_out;
  390. switch (attr->type) {
  391. case PERF_TYPE_SOFTWARE:
  392. if (attr->config != PERF_COUNT_SW_BPF_OUTPUT)
  393. goto err_out;
  394. /* fall-through */
  395. case PERF_TYPE_RAW:
  396. case PERF_TYPE_HARDWARE:
  397. ee = bpf_event_entry_gen(perf_file, map_file);
  398. if (ee)
  399. return ee;
  400. ee = ERR_PTR(-ENOMEM);
  401. /* fall-through */
  402. default:
  403. break;
  404. }
  405. err_out:
  406. fput(perf_file);
  407. return ee;
  408. }
  409. static void perf_event_fd_array_put_ptr(void *ptr)
  410. {
  411. bpf_event_entry_free_rcu(ptr);
  412. }
  413. static void perf_event_fd_array_release(struct bpf_map *map,
  414. struct file *map_file)
  415. {
  416. struct bpf_array *array = container_of(map, struct bpf_array, map);
  417. struct bpf_event_entry *ee;
  418. int i;
  419. rcu_read_lock();
  420. for (i = 0; i < array->map.max_entries; i++) {
  421. ee = READ_ONCE(array->ptrs[i]);
  422. if (ee && ee->map_file == map_file)
  423. fd_array_map_delete_elem(map, &i);
  424. }
  425. rcu_read_unlock();
  426. }
  427. static const struct bpf_map_ops perf_event_array_ops = {
  428. .map_alloc = fd_array_map_alloc,
  429. .map_free = fd_array_map_free,
  430. .map_get_next_key = array_map_get_next_key,
  431. .map_lookup_elem = fd_array_map_lookup_elem,
  432. .map_delete_elem = fd_array_map_delete_elem,
  433. .map_fd_get_ptr = perf_event_fd_array_get_ptr,
  434. .map_fd_put_ptr = perf_event_fd_array_put_ptr,
  435. .map_release = perf_event_fd_array_release,
  436. };
  437. static struct bpf_map_type_list perf_event_array_type __read_mostly = {
  438. .ops = &perf_event_array_ops,
  439. .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
  440. };
  441. static int __init register_perf_event_array_map(void)
  442. {
  443. bpf_register_map_type(&perf_event_array_type);
  444. return 0;
  445. }
  446. late_initcall(register_perf_event_array_map);
  447. #ifdef CONFIG_CGROUPS
  448. static void *cgroup_fd_array_get_ptr(struct bpf_map *map,
  449. struct file *map_file /* not used */,
  450. int fd)
  451. {
  452. return cgroup_get_from_fd(fd);
  453. }
  454. static void cgroup_fd_array_put_ptr(void *ptr)
  455. {
  456. /* cgroup_put free cgrp after a rcu grace period */
  457. cgroup_put(ptr);
  458. }
  459. static void cgroup_fd_array_free(struct bpf_map *map)
  460. {
  461. bpf_fd_array_map_clear(map);
  462. fd_array_map_free(map);
  463. }
  464. static const struct bpf_map_ops cgroup_array_ops = {
  465. .map_alloc = fd_array_map_alloc,
  466. .map_free = cgroup_fd_array_free,
  467. .map_get_next_key = array_map_get_next_key,
  468. .map_lookup_elem = fd_array_map_lookup_elem,
  469. .map_delete_elem = fd_array_map_delete_elem,
  470. .map_fd_get_ptr = cgroup_fd_array_get_ptr,
  471. .map_fd_put_ptr = cgroup_fd_array_put_ptr,
  472. };
  473. static struct bpf_map_type_list cgroup_array_type __read_mostly = {
  474. .ops = &cgroup_array_ops,
  475. .type = BPF_MAP_TYPE_CGROUP_ARRAY,
  476. };
  477. static int __init register_cgroup_array_map(void)
  478. {
  479. bpf_register_map_type(&cgroup_array_type);
  480. return 0;
  481. }
  482. late_initcall(register_cgroup_array_map);
  483. #endif