arraymap.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615
  1. /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
  2. * Copyright (c) 2016,2017 Facebook
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of version 2 of the GNU General Public
  6. * License as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. */
  13. #include <linux/bpf.h>
  14. #include <linux/err.h>
  15. #include <linux/slab.h>
  16. #include <linux/mm.h>
  17. #include <linux/filter.h>
  18. #include <linux/perf_event.h>
  19. #include "map_in_map.h"
  20. static void bpf_array_free_percpu(struct bpf_array *array)
  21. {
  22. int i;
  23. for (i = 0; i < array->map.max_entries; i++)
  24. free_percpu(array->pptrs[i]);
  25. }
  26. static int bpf_array_alloc_percpu(struct bpf_array *array)
  27. {
  28. void __percpu *ptr;
  29. int i;
  30. for (i = 0; i < array->map.max_entries; i++) {
  31. ptr = __alloc_percpu_gfp(array->elem_size, 8,
  32. GFP_USER | __GFP_NOWARN);
  33. if (!ptr) {
  34. bpf_array_free_percpu(array);
  35. return -ENOMEM;
  36. }
  37. array->pptrs[i] = ptr;
  38. }
  39. return 0;
  40. }
  41. /* Called from syscall */
  42. static struct bpf_map *array_map_alloc(union bpf_attr *attr)
  43. {
  44. bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
  45. struct bpf_array *array;
  46. u64 array_size;
  47. u32 elem_size;
  48. /* check sanity of attributes */
  49. if (attr->max_entries == 0 || attr->key_size != 4 ||
  50. attr->value_size == 0 || attr->map_flags)
  51. return ERR_PTR(-EINVAL);
  52. if (attr->value_size > KMALLOC_MAX_SIZE)
  53. /* if value_size is bigger, the user space won't be able to
  54. * access the elements.
  55. */
  56. return ERR_PTR(-E2BIG);
  57. elem_size = round_up(attr->value_size, 8);
  58. array_size = sizeof(*array);
  59. if (percpu)
  60. array_size += (u64) attr->max_entries * sizeof(void *);
  61. else
  62. array_size += (u64) attr->max_entries * elem_size;
  63. /* make sure there is no u32 overflow later in round_up() */
  64. if (array_size >= U32_MAX - PAGE_SIZE)
  65. return ERR_PTR(-ENOMEM);
  66. /* allocate all map elements and zero-initialize them */
  67. array = bpf_map_area_alloc(array_size);
  68. if (!array)
  69. return ERR_PTR(-ENOMEM);
  70. /* copy mandatory map attributes */
  71. array->map.map_type = attr->map_type;
  72. array->map.key_size = attr->key_size;
  73. array->map.value_size = attr->value_size;
  74. array->map.max_entries = attr->max_entries;
  75. array->map.map_flags = attr->map_flags;
  76. array->elem_size = elem_size;
  77. if (!percpu)
  78. goto out;
  79. array_size += (u64) attr->max_entries * elem_size * num_possible_cpus();
  80. if (array_size >= U32_MAX - PAGE_SIZE ||
  81. elem_size > PCPU_MIN_UNIT_SIZE || bpf_array_alloc_percpu(array)) {
  82. bpf_map_area_free(array);
  83. return ERR_PTR(-ENOMEM);
  84. }
  85. out:
  86. array->map.pages = round_up(array_size, PAGE_SIZE) >> PAGE_SHIFT;
  87. return &array->map;
  88. }
  89. /* Called from syscall or from eBPF program */
  90. static void *array_map_lookup_elem(struct bpf_map *map, void *key)
  91. {
  92. struct bpf_array *array = container_of(map, struct bpf_array, map);
  93. u32 index = *(u32 *)key;
  94. if (unlikely(index >= array->map.max_entries))
  95. return NULL;
  96. return array->value + array->elem_size * index;
  97. }
  98. /* emit BPF instructions equivalent to C code of array_map_lookup_elem() */
  99. static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
  100. {
  101. struct bpf_insn *insn = insn_buf;
  102. u32 elem_size = round_up(map->value_size, 8);
  103. const int ret = BPF_REG_0;
  104. const int map_ptr = BPF_REG_1;
  105. const int index = BPF_REG_2;
  106. *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
  107. *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
  108. *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3);
  109. if (is_power_of_2(elem_size)) {
  110. *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
  111. } else {
  112. *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
  113. }
  114. *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
  115. *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
  116. *insn++ = BPF_MOV64_IMM(ret, 0);
  117. return insn - insn_buf;
  118. }
  119. /* Called from eBPF program */
  120. static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
  121. {
  122. struct bpf_array *array = container_of(map, struct bpf_array, map);
  123. u32 index = *(u32 *)key;
  124. if (unlikely(index >= array->map.max_entries))
  125. return NULL;
  126. return this_cpu_ptr(array->pptrs[index]);
  127. }
  128. int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
  129. {
  130. struct bpf_array *array = container_of(map, struct bpf_array, map);
  131. u32 index = *(u32 *)key;
  132. void __percpu *pptr;
  133. int cpu, off = 0;
  134. u32 size;
  135. if (unlikely(index >= array->map.max_entries))
  136. return -ENOENT;
  137. /* per_cpu areas are zero-filled and bpf programs can only
  138. * access 'value_size' of them, so copying rounded areas
  139. * will not leak any kernel data
  140. */
  141. size = round_up(map->value_size, 8);
  142. rcu_read_lock();
  143. pptr = array->pptrs[index];
  144. for_each_possible_cpu(cpu) {
  145. bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size);
  146. off += size;
  147. }
  148. rcu_read_unlock();
  149. return 0;
  150. }
  151. /* Called from syscall */
  152. static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
  153. {
  154. struct bpf_array *array = container_of(map, struct bpf_array, map);
  155. u32 index = key ? *(u32 *)key : U32_MAX;
  156. u32 *next = (u32 *)next_key;
  157. if (index >= array->map.max_entries) {
  158. *next = 0;
  159. return 0;
  160. }
  161. if (index == array->map.max_entries - 1)
  162. return -ENOENT;
  163. *next = index + 1;
  164. return 0;
  165. }
  166. /* Called from syscall or from eBPF program */
  167. static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
  168. u64 map_flags)
  169. {
  170. struct bpf_array *array = container_of(map, struct bpf_array, map);
  171. u32 index = *(u32 *)key;
  172. if (unlikely(map_flags > BPF_EXIST))
  173. /* unknown flags */
  174. return -EINVAL;
  175. if (unlikely(index >= array->map.max_entries))
  176. /* all elements were pre-allocated, cannot insert a new one */
  177. return -E2BIG;
  178. if (unlikely(map_flags == BPF_NOEXIST))
  179. /* all elements already exist */
  180. return -EEXIST;
  181. if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
  182. memcpy(this_cpu_ptr(array->pptrs[index]),
  183. value, map->value_size);
  184. else
  185. memcpy(array->value + array->elem_size * index,
  186. value, map->value_size);
  187. return 0;
  188. }
  189. int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
  190. u64 map_flags)
  191. {
  192. struct bpf_array *array = container_of(map, struct bpf_array, map);
  193. u32 index = *(u32 *)key;
  194. void __percpu *pptr;
  195. int cpu, off = 0;
  196. u32 size;
  197. if (unlikely(map_flags > BPF_EXIST))
  198. /* unknown flags */
  199. return -EINVAL;
  200. if (unlikely(index >= array->map.max_entries))
  201. /* all elements were pre-allocated, cannot insert a new one */
  202. return -E2BIG;
  203. if (unlikely(map_flags == BPF_NOEXIST))
  204. /* all elements already exist */
  205. return -EEXIST;
  206. /* the user space will provide round_up(value_size, 8) bytes that
  207. * will be copied into per-cpu area. bpf programs can only access
  208. * value_size of it. During lookup the same extra bytes will be
  209. * returned or zeros which were zero-filled by percpu_alloc,
  210. * so no kernel data leaks possible
  211. */
  212. size = round_up(map->value_size, 8);
  213. rcu_read_lock();
  214. pptr = array->pptrs[index];
  215. for_each_possible_cpu(cpu) {
  216. bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size);
  217. off += size;
  218. }
  219. rcu_read_unlock();
  220. return 0;
  221. }
  222. /* Called from syscall or from eBPF program */
  223. static int array_map_delete_elem(struct bpf_map *map, void *key)
  224. {
  225. return -EINVAL;
  226. }
  227. /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
  228. static void array_map_free(struct bpf_map *map)
  229. {
  230. struct bpf_array *array = container_of(map, struct bpf_array, map);
  231. /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
  232. * so the programs (can be more than one that used this map) were
  233. * disconnected from events. Wait for outstanding programs to complete
  234. * and free the array
  235. */
  236. synchronize_rcu();
  237. if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
  238. bpf_array_free_percpu(array);
  239. bpf_map_area_free(array);
  240. }
  241. const struct bpf_map_ops array_map_ops = {
  242. .map_alloc = array_map_alloc,
  243. .map_free = array_map_free,
  244. .map_get_next_key = array_map_get_next_key,
  245. .map_lookup_elem = array_map_lookup_elem,
  246. .map_update_elem = array_map_update_elem,
  247. .map_delete_elem = array_map_delete_elem,
  248. .map_gen_lookup = array_map_gen_lookup,
  249. };
  250. const struct bpf_map_ops percpu_array_map_ops = {
  251. .map_alloc = array_map_alloc,
  252. .map_free = array_map_free,
  253. .map_get_next_key = array_map_get_next_key,
  254. .map_lookup_elem = percpu_array_map_lookup_elem,
  255. .map_update_elem = array_map_update_elem,
  256. .map_delete_elem = array_map_delete_elem,
  257. };
  258. static struct bpf_map *fd_array_map_alloc(union bpf_attr *attr)
  259. {
  260. /* only file descriptors can be stored in this type of map */
  261. if (attr->value_size != sizeof(u32))
  262. return ERR_PTR(-EINVAL);
  263. return array_map_alloc(attr);
  264. }
  265. static void fd_array_map_free(struct bpf_map *map)
  266. {
  267. struct bpf_array *array = container_of(map, struct bpf_array, map);
  268. int i;
  269. synchronize_rcu();
  270. /* make sure it's empty */
  271. for (i = 0; i < array->map.max_entries; i++)
  272. BUG_ON(array->ptrs[i] != NULL);
  273. bpf_map_area_free(array);
  274. }
  275. static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
  276. {
  277. return NULL;
  278. }
  279. /* only called from syscall */
  280. int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value)
  281. {
  282. void **elem, *ptr;
  283. int ret = 0;
  284. if (!map->ops->map_fd_sys_lookup_elem)
  285. return -ENOTSUPP;
  286. rcu_read_lock();
  287. elem = array_map_lookup_elem(map, key);
  288. if (elem && (ptr = READ_ONCE(*elem)))
  289. *value = map->ops->map_fd_sys_lookup_elem(ptr);
  290. else
  291. ret = -ENOENT;
  292. rcu_read_unlock();
  293. return ret;
  294. }
  295. /* only called from syscall */
  296. int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
  297. void *key, void *value, u64 map_flags)
  298. {
  299. struct bpf_array *array = container_of(map, struct bpf_array, map);
  300. void *new_ptr, *old_ptr;
  301. u32 index = *(u32 *)key, ufd;
  302. if (map_flags != BPF_ANY)
  303. return -EINVAL;
  304. if (index >= array->map.max_entries)
  305. return -E2BIG;
  306. ufd = *(u32 *)value;
  307. new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
  308. if (IS_ERR(new_ptr))
  309. return PTR_ERR(new_ptr);
  310. old_ptr = xchg(array->ptrs + index, new_ptr);
  311. if (old_ptr)
  312. map->ops->map_fd_put_ptr(old_ptr);
  313. return 0;
  314. }
  315. static int fd_array_map_delete_elem(struct bpf_map *map, void *key)
  316. {
  317. struct bpf_array *array = container_of(map, struct bpf_array, map);
  318. void *old_ptr;
  319. u32 index = *(u32 *)key;
  320. if (index >= array->map.max_entries)
  321. return -E2BIG;
  322. old_ptr = xchg(array->ptrs + index, NULL);
  323. if (old_ptr) {
  324. map->ops->map_fd_put_ptr(old_ptr);
  325. return 0;
  326. } else {
  327. return -ENOENT;
  328. }
  329. }
  330. static void *prog_fd_array_get_ptr(struct bpf_map *map,
  331. struct file *map_file, int fd)
  332. {
  333. struct bpf_array *array = container_of(map, struct bpf_array, map);
  334. struct bpf_prog *prog = bpf_prog_get(fd);
  335. if (IS_ERR(prog))
  336. return prog;
  337. if (!bpf_prog_array_compatible(array, prog)) {
  338. bpf_prog_put(prog);
  339. return ERR_PTR(-EINVAL);
  340. }
  341. return prog;
  342. }
  343. static void prog_fd_array_put_ptr(void *ptr)
  344. {
  345. bpf_prog_put(ptr);
  346. }
  347. static u32 prog_fd_array_sys_lookup_elem(void *ptr)
  348. {
  349. return ((struct bpf_prog *)ptr)->aux->id;
  350. }
  351. /* decrement refcnt of all bpf_progs that are stored in this map */
  352. void bpf_fd_array_map_clear(struct bpf_map *map)
  353. {
  354. struct bpf_array *array = container_of(map, struct bpf_array, map);
  355. int i;
  356. for (i = 0; i < array->map.max_entries; i++)
  357. fd_array_map_delete_elem(map, &i);
  358. }
  359. const struct bpf_map_ops prog_array_map_ops = {
  360. .map_alloc = fd_array_map_alloc,
  361. .map_free = fd_array_map_free,
  362. .map_get_next_key = array_map_get_next_key,
  363. .map_lookup_elem = fd_array_map_lookup_elem,
  364. .map_delete_elem = fd_array_map_delete_elem,
  365. .map_fd_get_ptr = prog_fd_array_get_ptr,
  366. .map_fd_put_ptr = prog_fd_array_put_ptr,
  367. .map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem,
  368. };
  369. static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file,
  370. struct file *map_file)
  371. {
  372. struct bpf_event_entry *ee;
  373. ee = kzalloc(sizeof(*ee), GFP_ATOMIC);
  374. if (ee) {
  375. ee->event = perf_file->private_data;
  376. ee->perf_file = perf_file;
  377. ee->map_file = map_file;
  378. }
  379. return ee;
  380. }
  381. static void __bpf_event_entry_free(struct rcu_head *rcu)
  382. {
  383. struct bpf_event_entry *ee;
  384. ee = container_of(rcu, struct bpf_event_entry, rcu);
  385. fput(ee->perf_file);
  386. kfree(ee);
  387. }
  388. static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee)
  389. {
  390. call_rcu(&ee->rcu, __bpf_event_entry_free);
  391. }
  392. static void *perf_event_fd_array_get_ptr(struct bpf_map *map,
  393. struct file *map_file, int fd)
  394. {
  395. struct bpf_event_entry *ee;
  396. struct perf_event *event;
  397. struct file *perf_file;
  398. u64 value;
  399. perf_file = perf_event_get(fd);
  400. if (IS_ERR(perf_file))
  401. return perf_file;
  402. ee = ERR_PTR(-EOPNOTSUPP);
  403. event = perf_file->private_data;
  404. if (perf_event_read_local(event, &value) == -EOPNOTSUPP)
  405. goto err_out;
  406. ee = bpf_event_entry_gen(perf_file, map_file);
  407. if (ee)
  408. return ee;
  409. ee = ERR_PTR(-ENOMEM);
  410. err_out:
  411. fput(perf_file);
  412. return ee;
  413. }
  414. static void perf_event_fd_array_put_ptr(void *ptr)
  415. {
  416. bpf_event_entry_free_rcu(ptr);
  417. }
  418. static void perf_event_fd_array_release(struct bpf_map *map,
  419. struct file *map_file)
  420. {
  421. struct bpf_array *array = container_of(map, struct bpf_array, map);
  422. struct bpf_event_entry *ee;
  423. int i;
  424. rcu_read_lock();
  425. for (i = 0; i < array->map.max_entries; i++) {
  426. ee = READ_ONCE(array->ptrs[i]);
  427. if (ee && ee->map_file == map_file)
  428. fd_array_map_delete_elem(map, &i);
  429. }
  430. rcu_read_unlock();
  431. }
  432. const struct bpf_map_ops perf_event_array_map_ops = {
  433. .map_alloc = fd_array_map_alloc,
  434. .map_free = fd_array_map_free,
  435. .map_get_next_key = array_map_get_next_key,
  436. .map_lookup_elem = fd_array_map_lookup_elem,
  437. .map_delete_elem = fd_array_map_delete_elem,
  438. .map_fd_get_ptr = perf_event_fd_array_get_ptr,
  439. .map_fd_put_ptr = perf_event_fd_array_put_ptr,
  440. .map_release = perf_event_fd_array_release,
  441. };
  442. #ifdef CONFIG_CGROUPS
  443. static void *cgroup_fd_array_get_ptr(struct bpf_map *map,
  444. struct file *map_file /* not used */,
  445. int fd)
  446. {
  447. return cgroup_get_from_fd(fd);
  448. }
  449. static void cgroup_fd_array_put_ptr(void *ptr)
  450. {
  451. /* cgroup_put free cgrp after a rcu grace period */
  452. cgroup_put(ptr);
  453. }
  454. static void cgroup_fd_array_free(struct bpf_map *map)
  455. {
  456. bpf_fd_array_map_clear(map);
  457. fd_array_map_free(map);
  458. }
  459. const struct bpf_map_ops cgroup_array_map_ops = {
  460. .map_alloc = fd_array_map_alloc,
  461. .map_free = cgroup_fd_array_free,
  462. .map_get_next_key = array_map_get_next_key,
  463. .map_lookup_elem = fd_array_map_lookup_elem,
  464. .map_delete_elem = fd_array_map_delete_elem,
  465. .map_fd_get_ptr = cgroup_fd_array_get_ptr,
  466. .map_fd_put_ptr = cgroup_fd_array_put_ptr,
  467. };
  468. #endif
  469. static struct bpf_map *array_of_map_alloc(union bpf_attr *attr)
  470. {
  471. struct bpf_map *map, *inner_map_meta;
  472. inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd);
  473. if (IS_ERR(inner_map_meta))
  474. return inner_map_meta;
  475. map = fd_array_map_alloc(attr);
  476. if (IS_ERR(map)) {
  477. bpf_map_meta_free(inner_map_meta);
  478. return map;
  479. }
  480. map->inner_map_meta = inner_map_meta;
  481. return map;
  482. }
  483. static void array_of_map_free(struct bpf_map *map)
  484. {
  485. /* map->inner_map_meta is only accessed by syscall which
  486. * is protected by fdget/fdput.
  487. */
  488. bpf_map_meta_free(map->inner_map_meta);
  489. bpf_fd_array_map_clear(map);
  490. fd_array_map_free(map);
  491. }
  492. static void *array_of_map_lookup_elem(struct bpf_map *map, void *key)
  493. {
  494. struct bpf_map **inner_map = array_map_lookup_elem(map, key);
  495. if (!inner_map)
  496. return NULL;
  497. return READ_ONCE(*inner_map);
  498. }
  499. const struct bpf_map_ops array_of_maps_map_ops = {
  500. .map_alloc = array_of_map_alloc,
  501. .map_free = array_of_map_free,
  502. .map_get_next_key = array_map_get_next_key,
  503. .map_lookup_elem = array_of_map_lookup_elem,
  504. .map_delete_elem = fd_array_map_delete_elem,
  505. .map_fd_get_ptr = bpf_map_fd_get_ptr,
  506. .map_fd_put_ptr = bpf_map_fd_put_ptr,
  507. .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
  508. };