arraymap.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604
  1. /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
  2. * Copyright (c) 2016,2017 Facebook
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of version 2 of the GNU General Public
  6. * License as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. */
  13. #include <linux/bpf.h>
  14. #include <linux/err.h>
  15. #include <linux/slab.h>
  16. #include <linux/mm.h>
  17. #include <linux/filter.h>
  18. #include <linux/perf_event.h>
  19. static void bpf_array_free_percpu(struct bpf_array *array)
  20. {
  21. int i;
  22. for (i = 0; i < array->map.max_entries; i++)
  23. free_percpu(array->pptrs[i]);
  24. }
  25. static int bpf_array_alloc_percpu(struct bpf_array *array)
  26. {
  27. void __percpu *ptr;
  28. int i;
  29. for (i = 0; i < array->map.max_entries; i++) {
  30. ptr = __alloc_percpu_gfp(array->elem_size, 8,
  31. GFP_USER | __GFP_NOWARN);
  32. if (!ptr) {
  33. bpf_array_free_percpu(array);
  34. return -ENOMEM;
  35. }
  36. array->pptrs[i] = ptr;
  37. }
  38. return 0;
  39. }
  40. /* Called from syscall */
  41. static struct bpf_map *array_map_alloc(union bpf_attr *attr)
  42. {
  43. bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
  44. struct bpf_array *array;
  45. u64 array_size;
  46. u32 elem_size;
  47. /* check sanity of attributes */
  48. if (attr->max_entries == 0 || attr->key_size != 4 ||
  49. attr->value_size == 0 || attr->map_flags)
  50. return ERR_PTR(-EINVAL);
  51. if (attr->value_size > KMALLOC_MAX_SIZE)
  52. /* if value_size is bigger, the user space won't be able to
  53. * access the elements.
  54. */
  55. return ERR_PTR(-E2BIG);
  56. elem_size = round_up(attr->value_size, 8);
  57. array_size = sizeof(*array);
  58. if (percpu)
  59. array_size += (u64) attr->max_entries * sizeof(void *);
  60. else
  61. array_size += (u64) attr->max_entries * elem_size;
  62. /* make sure there is no u32 overflow later in round_up() */
  63. if (array_size >= U32_MAX - PAGE_SIZE)
  64. return ERR_PTR(-ENOMEM);
  65. /* allocate all map elements and zero-initialize them */
  66. array = bpf_map_area_alloc(array_size);
  67. if (!array)
  68. return ERR_PTR(-ENOMEM);
  69. /* copy mandatory map attributes */
  70. array->map.map_type = attr->map_type;
  71. array->map.key_size = attr->key_size;
  72. array->map.value_size = attr->value_size;
  73. array->map.max_entries = attr->max_entries;
  74. array->elem_size = elem_size;
  75. if (!percpu)
  76. goto out;
  77. array_size += (u64) attr->max_entries * elem_size * num_possible_cpus();
  78. if (array_size >= U32_MAX - PAGE_SIZE ||
  79. elem_size > PCPU_MIN_UNIT_SIZE || bpf_array_alloc_percpu(array)) {
  80. bpf_map_area_free(array);
  81. return ERR_PTR(-ENOMEM);
  82. }
  83. out:
  84. array->map.pages = round_up(array_size, PAGE_SIZE) >> PAGE_SHIFT;
  85. return &array->map;
  86. }
  87. /* Called from syscall or from eBPF program */
  88. static void *array_map_lookup_elem(struct bpf_map *map, void *key)
  89. {
  90. struct bpf_array *array = container_of(map, struct bpf_array, map);
  91. u32 index = *(u32 *)key;
  92. if (unlikely(index >= array->map.max_entries))
  93. return NULL;
  94. return array->value + array->elem_size * index;
  95. }
  96. /* emit BPF instructions equivalent to C code of array_map_lookup_elem() */
  97. static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
  98. {
  99. struct bpf_insn *insn = insn_buf;
  100. u32 elem_size = round_up(map->value_size, 8);
  101. const int ret = BPF_REG_0;
  102. const int map_ptr = BPF_REG_1;
  103. const int index = BPF_REG_2;
  104. *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
  105. *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
  106. *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3);
  107. if (is_power_of_2(elem_size)) {
  108. *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
  109. } else {
  110. *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
  111. }
  112. *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
  113. *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
  114. *insn++ = BPF_MOV64_IMM(ret, 0);
  115. return insn - insn_buf;
  116. }
  117. /* Called from eBPF program */
  118. static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
  119. {
  120. struct bpf_array *array = container_of(map, struct bpf_array, map);
  121. u32 index = *(u32 *)key;
  122. if (unlikely(index >= array->map.max_entries))
  123. return NULL;
  124. return this_cpu_ptr(array->pptrs[index]);
  125. }
  126. int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
  127. {
  128. struct bpf_array *array = container_of(map, struct bpf_array, map);
  129. u32 index = *(u32 *)key;
  130. void __percpu *pptr;
  131. int cpu, off = 0;
  132. u32 size;
  133. if (unlikely(index >= array->map.max_entries))
  134. return -ENOENT;
  135. /* per_cpu areas are zero-filled and bpf programs can only
  136. * access 'value_size' of them, so copying rounded areas
  137. * will not leak any kernel data
  138. */
  139. size = round_up(map->value_size, 8);
  140. rcu_read_lock();
  141. pptr = array->pptrs[index];
  142. for_each_possible_cpu(cpu) {
  143. bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size);
  144. off += size;
  145. }
  146. rcu_read_unlock();
  147. return 0;
  148. }
  149. /* Called from syscall */
  150. static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
  151. {
  152. struct bpf_array *array = container_of(map, struct bpf_array, map);
  153. u32 index = *(u32 *)key;
  154. u32 *next = (u32 *)next_key;
  155. if (index >= array->map.max_entries) {
  156. *next = 0;
  157. return 0;
  158. }
  159. if (index == array->map.max_entries - 1)
  160. return -ENOENT;
  161. *next = index + 1;
  162. return 0;
  163. }
  164. /* Called from syscall or from eBPF program */
  165. static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
  166. u64 map_flags)
  167. {
  168. struct bpf_array *array = container_of(map, struct bpf_array, map);
  169. u32 index = *(u32 *)key;
  170. if (unlikely(map_flags > BPF_EXIST))
  171. /* unknown flags */
  172. return -EINVAL;
  173. if (unlikely(index >= array->map.max_entries))
  174. /* all elements were pre-allocated, cannot insert a new one */
  175. return -E2BIG;
  176. if (unlikely(map_flags == BPF_NOEXIST))
  177. /* all elements already exist */
  178. return -EEXIST;
  179. if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
  180. memcpy(this_cpu_ptr(array->pptrs[index]),
  181. value, map->value_size);
  182. else
  183. memcpy(array->value + array->elem_size * index,
  184. value, map->value_size);
  185. return 0;
  186. }
  187. int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
  188. u64 map_flags)
  189. {
  190. struct bpf_array *array = container_of(map, struct bpf_array, map);
  191. u32 index = *(u32 *)key;
  192. void __percpu *pptr;
  193. int cpu, off = 0;
  194. u32 size;
  195. if (unlikely(map_flags > BPF_EXIST))
  196. /* unknown flags */
  197. return -EINVAL;
  198. if (unlikely(index >= array->map.max_entries))
  199. /* all elements were pre-allocated, cannot insert a new one */
  200. return -E2BIG;
  201. if (unlikely(map_flags == BPF_NOEXIST))
  202. /* all elements already exist */
  203. return -EEXIST;
  204. /* the user space will provide round_up(value_size, 8) bytes that
  205. * will be copied into per-cpu area. bpf programs can only access
  206. * value_size of it. During lookup the same extra bytes will be
  207. * returned or zeros which were zero-filled by percpu_alloc,
  208. * so no kernel data leaks possible
  209. */
  210. size = round_up(map->value_size, 8);
  211. rcu_read_lock();
  212. pptr = array->pptrs[index];
  213. for_each_possible_cpu(cpu) {
  214. bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size);
  215. off += size;
  216. }
  217. rcu_read_unlock();
  218. return 0;
  219. }
  220. /* Called from syscall or from eBPF program */
  221. static int array_map_delete_elem(struct bpf_map *map, void *key)
  222. {
  223. return -EINVAL;
  224. }
  225. /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
  226. static void array_map_free(struct bpf_map *map)
  227. {
  228. struct bpf_array *array = container_of(map, struct bpf_array, map);
  229. /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
  230. * so the programs (can be more than one that used this map) were
  231. * disconnected from events. Wait for outstanding programs to complete
  232. * and free the array
  233. */
  234. synchronize_rcu();
  235. if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
  236. bpf_array_free_percpu(array);
  237. bpf_map_area_free(array);
  238. }
  239. static const struct bpf_map_ops array_ops = {
  240. .map_alloc = array_map_alloc,
  241. .map_free = array_map_free,
  242. .map_get_next_key = array_map_get_next_key,
  243. .map_lookup_elem = array_map_lookup_elem,
  244. .map_update_elem = array_map_update_elem,
  245. .map_delete_elem = array_map_delete_elem,
  246. .map_gen_lookup = array_map_gen_lookup,
  247. };
  248. static struct bpf_map_type_list array_type __ro_after_init = {
  249. .ops = &array_ops,
  250. .type = BPF_MAP_TYPE_ARRAY,
  251. };
  252. static const struct bpf_map_ops percpu_array_ops = {
  253. .map_alloc = array_map_alloc,
  254. .map_free = array_map_free,
  255. .map_get_next_key = array_map_get_next_key,
  256. .map_lookup_elem = percpu_array_map_lookup_elem,
  257. .map_update_elem = array_map_update_elem,
  258. .map_delete_elem = array_map_delete_elem,
  259. };
  260. static struct bpf_map_type_list percpu_array_type __ro_after_init = {
  261. .ops = &percpu_array_ops,
  262. .type = BPF_MAP_TYPE_PERCPU_ARRAY,
  263. };
  264. static int __init register_array_map(void)
  265. {
  266. bpf_register_map_type(&array_type);
  267. bpf_register_map_type(&percpu_array_type);
  268. return 0;
  269. }
  270. late_initcall(register_array_map);
  271. static struct bpf_map *fd_array_map_alloc(union bpf_attr *attr)
  272. {
  273. /* only file descriptors can be stored in this type of map */
  274. if (attr->value_size != sizeof(u32))
  275. return ERR_PTR(-EINVAL);
  276. return array_map_alloc(attr);
  277. }
  278. static void fd_array_map_free(struct bpf_map *map)
  279. {
  280. struct bpf_array *array = container_of(map, struct bpf_array, map);
  281. int i;
  282. synchronize_rcu();
  283. /* make sure it's empty */
  284. for (i = 0; i < array->map.max_entries; i++)
  285. BUG_ON(array->ptrs[i] != NULL);
  286. bpf_map_area_free(array);
  287. }
  288. static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
  289. {
  290. return NULL;
  291. }
  292. /* only called from syscall */
  293. int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
  294. void *key, void *value, u64 map_flags)
  295. {
  296. struct bpf_array *array = container_of(map, struct bpf_array, map);
  297. void *new_ptr, *old_ptr;
  298. u32 index = *(u32 *)key, ufd;
  299. if (map_flags != BPF_ANY)
  300. return -EINVAL;
  301. if (index >= array->map.max_entries)
  302. return -E2BIG;
  303. ufd = *(u32 *)value;
  304. new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
  305. if (IS_ERR(new_ptr))
  306. return PTR_ERR(new_ptr);
  307. old_ptr = xchg(array->ptrs + index, new_ptr);
  308. if (old_ptr)
  309. map->ops->map_fd_put_ptr(old_ptr);
  310. return 0;
  311. }
  312. static int fd_array_map_delete_elem(struct bpf_map *map, void *key)
  313. {
  314. struct bpf_array *array = container_of(map, struct bpf_array, map);
  315. void *old_ptr;
  316. u32 index = *(u32 *)key;
  317. if (index >= array->map.max_entries)
  318. return -E2BIG;
  319. old_ptr = xchg(array->ptrs + index, NULL);
  320. if (old_ptr) {
  321. map->ops->map_fd_put_ptr(old_ptr);
  322. return 0;
  323. } else {
  324. return -ENOENT;
  325. }
  326. }
  327. static void *prog_fd_array_get_ptr(struct bpf_map *map,
  328. struct file *map_file, int fd)
  329. {
  330. struct bpf_array *array = container_of(map, struct bpf_array, map);
  331. struct bpf_prog *prog = bpf_prog_get(fd);
  332. if (IS_ERR(prog))
  333. return prog;
  334. if (!bpf_prog_array_compatible(array, prog)) {
  335. bpf_prog_put(prog);
  336. return ERR_PTR(-EINVAL);
  337. }
  338. return prog;
  339. }
  340. static void prog_fd_array_put_ptr(void *ptr)
  341. {
  342. bpf_prog_put(ptr);
  343. }
  344. /* decrement refcnt of all bpf_progs that are stored in this map */
  345. void bpf_fd_array_map_clear(struct bpf_map *map)
  346. {
  347. struct bpf_array *array = container_of(map, struct bpf_array, map);
  348. int i;
  349. for (i = 0; i < array->map.max_entries; i++)
  350. fd_array_map_delete_elem(map, &i);
  351. }
  352. static const struct bpf_map_ops prog_array_ops = {
  353. .map_alloc = fd_array_map_alloc,
  354. .map_free = fd_array_map_free,
  355. .map_get_next_key = array_map_get_next_key,
  356. .map_lookup_elem = fd_array_map_lookup_elem,
  357. .map_delete_elem = fd_array_map_delete_elem,
  358. .map_fd_get_ptr = prog_fd_array_get_ptr,
  359. .map_fd_put_ptr = prog_fd_array_put_ptr,
  360. };
  361. static struct bpf_map_type_list prog_array_type __ro_after_init = {
  362. .ops = &prog_array_ops,
  363. .type = BPF_MAP_TYPE_PROG_ARRAY,
  364. };
  365. static int __init register_prog_array_map(void)
  366. {
  367. bpf_register_map_type(&prog_array_type);
  368. return 0;
  369. }
  370. late_initcall(register_prog_array_map);
  371. static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file,
  372. struct file *map_file)
  373. {
  374. struct bpf_event_entry *ee;
  375. ee = kzalloc(sizeof(*ee), GFP_ATOMIC);
  376. if (ee) {
  377. ee->event = perf_file->private_data;
  378. ee->perf_file = perf_file;
  379. ee->map_file = map_file;
  380. }
  381. return ee;
  382. }
  383. static void __bpf_event_entry_free(struct rcu_head *rcu)
  384. {
  385. struct bpf_event_entry *ee;
  386. ee = container_of(rcu, struct bpf_event_entry, rcu);
  387. fput(ee->perf_file);
  388. kfree(ee);
  389. }
  390. static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee)
  391. {
  392. call_rcu(&ee->rcu, __bpf_event_entry_free);
  393. }
  394. static void *perf_event_fd_array_get_ptr(struct bpf_map *map,
  395. struct file *map_file, int fd)
  396. {
  397. const struct perf_event_attr *attr;
  398. struct bpf_event_entry *ee;
  399. struct perf_event *event;
  400. struct file *perf_file;
  401. perf_file = perf_event_get(fd);
  402. if (IS_ERR(perf_file))
  403. return perf_file;
  404. event = perf_file->private_data;
  405. ee = ERR_PTR(-EINVAL);
  406. attr = perf_event_attrs(event);
  407. if (IS_ERR(attr) || attr->inherit)
  408. goto err_out;
  409. switch (attr->type) {
  410. case PERF_TYPE_SOFTWARE:
  411. if (attr->config != PERF_COUNT_SW_BPF_OUTPUT)
  412. goto err_out;
  413. /* fall-through */
  414. case PERF_TYPE_RAW:
  415. case PERF_TYPE_HARDWARE:
  416. ee = bpf_event_entry_gen(perf_file, map_file);
  417. if (ee)
  418. return ee;
  419. ee = ERR_PTR(-ENOMEM);
  420. /* fall-through */
  421. default:
  422. break;
  423. }
  424. err_out:
  425. fput(perf_file);
  426. return ee;
  427. }
  428. static void perf_event_fd_array_put_ptr(void *ptr)
  429. {
  430. bpf_event_entry_free_rcu(ptr);
  431. }
  432. static void perf_event_fd_array_release(struct bpf_map *map,
  433. struct file *map_file)
  434. {
  435. struct bpf_array *array = container_of(map, struct bpf_array, map);
  436. struct bpf_event_entry *ee;
  437. int i;
  438. rcu_read_lock();
  439. for (i = 0; i < array->map.max_entries; i++) {
  440. ee = READ_ONCE(array->ptrs[i]);
  441. if (ee && ee->map_file == map_file)
  442. fd_array_map_delete_elem(map, &i);
  443. }
  444. rcu_read_unlock();
  445. }
  446. static const struct bpf_map_ops perf_event_array_ops = {
  447. .map_alloc = fd_array_map_alloc,
  448. .map_free = fd_array_map_free,
  449. .map_get_next_key = array_map_get_next_key,
  450. .map_lookup_elem = fd_array_map_lookup_elem,
  451. .map_delete_elem = fd_array_map_delete_elem,
  452. .map_fd_get_ptr = perf_event_fd_array_get_ptr,
  453. .map_fd_put_ptr = perf_event_fd_array_put_ptr,
  454. .map_release = perf_event_fd_array_release,
  455. };
  456. static struct bpf_map_type_list perf_event_array_type __ro_after_init = {
  457. .ops = &perf_event_array_ops,
  458. .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
  459. };
  460. static int __init register_perf_event_array_map(void)
  461. {
  462. bpf_register_map_type(&perf_event_array_type);
  463. return 0;
  464. }
  465. late_initcall(register_perf_event_array_map);
  466. #ifdef CONFIG_CGROUPS
  467. static void *cgroup_fd_array_get_ptr(struct bpf_map *map,
  468. struct file *map_file /* not used */,
  469. int fd)
  470. {
  471. return cgroup_get_from_fd(fd);
  472. }
  473. static void cgroup_fd_array_put_ptr(void *ptr)
  474. {
  475. /* cgroup_put free cgrp after a rcu grace period */
  476. cgroup_put(ptr);
  477. }
  478. static void cgroup_fd_array_free(struct bpf_map *map)
  479. {
  480. bpf_fd_array_map_clear(map);
  481. fd_array_map_free(map);
  482. }
  483. static const struct bpf_map_ops cgroup_array_ops = {
  484. .map_alloc = fd_array_map_alloc,
  485. .map_free = cgroup_fd_array_free,
  486. .map_get_next_key = array_map_get_next_key,
  487. .map_lookup_elem = fd_array_map_lookup_elem,
  488. .map_delete_elem = fd_array_map_delete_elem,
  489. .map_fd_get_ptr = cgroup_fd_array_get_ptr,
  490. .map_fd_put_ptr = cgroup_fd_array_put_ptr,
  491. };
  492. static struct bpf_map_type_list cgroup_array_type __ro_after_init = {
  493. .ops = &cgroup_array_ops,
  494. .type = BPF_MAP_TYPE_CGROUP_ARRAY,
  495. };
  496. static int __init register_cgroup_array_map(void)
  497. {
  498. bpf_register_map_type(&cgroup_array_type);
  499. return 0;
  500. }
  501. late_initcall(register_cgroup_array_map);
  502. #endif