arraymap.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702
  1. /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
  2. * Copyright (c) 2016,2017 Facebook
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of version 2 of the GNU General Public
  6. * License as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. */
  13. #include <linux/bpf.h>
  14. #include <linux/err.h>
  15. #include <linux/slab.h>
  16. #include <linux/mm.h>
  17. #include <linux/filter.h>
  18. #include <linux/perf_event.h>
  19. #include "map_in_map.h"
  20. #define ARRAY_CREATE_FLAG_MASK \
  21. (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
  22. static void bpf_array_free_percpu(struct bpf_array *array)
  23. {
  24. int i;
  25. for (i = 0; i < array->map.max_entries; i++) {
  26. free_percpu(array->pptrs[i]);
  27. cond_resched();
  28. }
  29. }
  30. static int bpf_array_alloc_percpu(struct bpf_array *array)
  31. {
  32. void __percpu *ptr;
  33. int i;
  34. for (i = 0; i < array->map.max_entries; i++) {
  35. ptr = __alloc_percpu_gfp(array->elem_size, 8,
  36. GFP_USER | __GFP_NOWARN);
  37. if (!ptr) {
  38. bpf_array_free_percpu(array);
  39. return -ENOMEM;
  40. }
  41. array->pptrs[i] = ptr;
  42. cond_resched();
  43. }
  44. return 0;
  45. }
  46. /* Called from syscall */
  47. static int array_map_alloc_check(union bpf_attr *attr)
  48. {
  49. bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
  50. int numa_node = bpf_map_attr_numa_node(attr);
  51. /* check sanity of attributes */
  52. if (attr->max_entries == 0 || attr->key_size != 4 ||
  53. attr->value_size == 0 ||
  54. attr->map_flags & ~ARRAY_CREATE_FLAG_MASK ||
  55. (percpu && numa_node != NUMA_NO_NODE))
  56. return -EINVAL;
  57. if (attr->value_size > KMALLOC_MAX_SIZE)
  58. /* if value_size is bigger, the user space won't be able to
  59. * access the elements.
  60. */
  61. return -E2BIG;
  62. return 0;
  63. }
  64. static struct bpf_map *array_map_alloc(union bpf_attr *attr)
  65. {
  66. bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
  67. int ret, numa_node = bpf_map_attr_numa_node(attr);
  68. u32 elem_size, index_mask, max_entries;
  69. bool unpriv = !capable(CAP_SYS_ADMIN);
  70. u64 cost, array_size, mask64;
  71. struct bpf_array *array;
  72. elem_size = round_up(attr->value_size, 8);
  73. max_entries = attr->max_entries;
  74. /* On 32 bit archs roundup_pow_of_two() with max_entries that has
  75. * upper most bit set in u32 space is undefined behavior due to
  76. * resulting 1U << 32, so do it manually here in u64 space.
  77. */
  78. mask64 = fls_long(max_entries - 1);
  79. mask64 = 1ULL << mask64;
  80. mask64 -= 1;
  81. index_mask = mask64;
  82. if (unpriv) {
  83. /* round up array size to nearest power of 2,
  84. * since cpu will speculate within index_mask limits
  85. */
  86. max_entries = index_mask + 1;
  87. /* Check for overflows. */
  88. if (max_entries < attr->max_entries)
  89. return ERR_PTR(-E2BIG);
  90. }
  91. array_size = sizeof(*array);
  92. if (percpu)
  93. array_size += (u64) max_entries * sizeof(void *);
  94. else
  95. array_size += (u64) max_entries * elem_size;
  96. /* make sure there is no u32 overflow later in round_up() */
  97. cost = array_size;
  98. if (cost >= U32_MAX - PAGE_SIZE)
  99. return ERR_PTR(-ENOMEM);
  100. if (percpu) {
  101. cost += (u64)attr->max_entries * elem_size * num_possible_cpus();
  102. if (cost >= U32_MAX - PAGE_SIZE)
  103. return ERR_PTR(-ENOMEM);
  104. }
  105. cost = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
  106. ret = bpf_map_precharge_memlock(cost);
  107. if (ret < 0)
  108. return ERR_PTR(ret);
  109. /* allocate all map elements and zero-initialize them */
  110. array = bpf_map_area_alloc(array_size, numa_node);
  111. if (!array)
  112. return ERR_PTR(-ENOMEM);
  113. array->index_mask = index_mask;
  114. array->map.unpriv_array = unpriv;
  115. /* copy mandatory map attributes */
  116. bpf_map_init_from_attr(&array->map, attr);
  117. array->map.pages = cost;
  118. array->elem_size = elem_size;
  119. if (percpu && bpf_array_alloc_percpu(array)) {
  120. bpf_map_area_free(array);
  121. return ERR_PTR(-ENOMEM);
  122. }
  123. return &array->map;
  124. }
  125. /* Called from syscall or from eBPF program */
  126. static void *array_map_lookup_elem(struct bpf_map *map, void *key)
  127. {
  128. struct bpf_array *array = container_of(map, struct bpf_array, map);
  129. u32 index = *(u32 *)key;
  130. if (unlikely(index >= array->map.max_entries))
  131. return NULL;
  132. return array->value + array->elem_size * (index & array->index_mask);
  133. }
  134. /* emit BPF instructions equivalent to C code of array_map_lookup_elem() */
  135. static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
  136. {
  137. struct bpf_array *array = container_of(map, struct bpf_array, map);
  138. struct bpf_insn *insn = insn_buf;
  139. u32 elem_size = round_up(map->value_size, 8);
  140. const int ret = BPF_REG_0;
  141. const int map_ptr = BPF_REG_1;
  142. const int index = BPF_REG_2;
  143. *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
  144. *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
  145. if (map->unpriv_array) {
  146. *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 4);
  147. *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
  148. } else {
  149. *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3);
  150. }
  151. if (is_power_of_2(elem_size)) {
  152. *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
  153. } else {
  154. *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
  155. }
  156. *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
  157. *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
  158. *insn++ = BPF_MOV64_IMM(ret, 0);
  159. return insn - insn_buf;
  160. }
  161. /* Called from eBPF program */
  162. static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
  163. {
  164. struct bpf_array *array = container_of(map, struct bpf_array, map);
  165. u32 index = *(u32 *)key;
  166. if (unlikely(index >= array->map.max_entries))
  167. return NULL;
  168. return this_cpu_ptr(array->pptrs[index & array->index_mask]);
  169. }
  170. int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
  171. {
  172. struct bpf_array *array = container_of(map, struct bpf_array, map);
  173. u32 index = *(u32 *)key;
  174. void __percpu *pptr;
  175. int cpu, off = 0;
  176. u32 size;
  177. if (unlikely(index >= array->map.max_entries))
  178. return -ENOENT;
  179. /* per_cpu areas are zero-filled and bpf programs can only
  180. * access 'value_size' of them, so copying rounded areas
  181. * will not leak any kernel data
  182. */
  183. size = round_up(map->value_size, 8);
  184. rcu_read_lock();
  185. pptr = array->pptrs[index & array->index_mask];
  186. for_each_possible_cpu(cpu) {
  187. bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size);
  188. off += size;
  189. }
  190. rcu_read_unlock();
  191. return 0;
  192. }
  193. /* Called from syscall */
  194. static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
  195. {
  196. struct bpf_array *array = container_of(map, struct bpf_array, map);
  197. u32 index = key ? *(u32 *)key : U32_MAX;
  198. u32 *next = (u32 *)next_key;
  199. if (index >= array->map.max_entries) {
  200. *next = 0;
  201. return 0;
  202. }
  203. if (index == array->map.max_entries - 1)
  204. return -ENOENT;
  205. *next = index + 1;
  206. return 0;
  207. }
  208. /* Called from syscall or from eBPF program */
  209. static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
  210. u64 map_flags)
  211. {
  212. struct bpf_array *array = container_of(map, struct bpf_array, map);
  213. u32 index = *(u32 *)key;
  214. if (unlikely(map_flags > BPF_EXIST))
  215. /* unknown flags */
  216. return -EINVAL;
  217. if (unlikely(index >= array->map.max_entries))
  218. /* all elements were pre-allocated, cannot insert a new one */
  219. return -E2BIG;
  220. if (unlikely(map_flags == BPF_NOEXIST))
  221. /* all elements already exist */
  222. return -EEXIST;
  223. if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
  224. memcpy(this_cpu_ptr(array->pptrs[index & array->index_mask]),
  225. value, map->value_size);
  226. else
  227. memcpy(array->value +
  228. array->elem_size * (index & array->index_mask),
  229. value, map->value_size);
  230. return 0;
  231. }
  232. int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
  233. u64 map_flags)
  234. {
  235. struct bpf_array *array = container_of(map, struct bpf_array, map);
  236. u32 index = *(u32 *)key;
  237. void __percpu *pptr;
  238. int cpu, off = 0;
  239. u32 size;
  240. if (unlikely(map_flags > BPF_EXIST))
  241. /* unknown flags */
  242. return -EINVAL;
  243. if (unlikely(index >= array->map.max_entries))
  244. /* all elements were pre-allocated, cannot insert a new one */
  245. return -E2BIG;
  246. if (unlikely(map_flags == BPF_NOEXIST))
  247. /* all elements already exist */
  248. return -EEXIST;
  249. /* the user space will provide round_up(value_size, 8) bytes that
  250. * will be copied into per-cpu area. bpf programs can only access
  251. * value_size of it. During lookup the same extra bytes will be
  252. * returned or zeros which were zero-filled by percpu_alloc,
  253. * so no kernel data leaks possible
  254. */
  255. size = round_up(map->value_size, 8);
  256. rcu_read_lock();
  257. pptr = array->pptrs[index & array->index_mask];
  258. for_each_possible_cpu(cpu) {
  259. bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size);
  260. off += size;
  261. }
  262. rcu_read_unlock();
  263. return 0;
  264. }
  265. /* Called from syscall or from eBPF program */
  266. static int array_map_delete_elem(struct bpf_map *map, void *key)
  267. {
  268. return -EINVAL;
  269. }
  270. /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
  271. static void array_map_free(struct bpf_map *map)
  272. {
  273. struct bpf_array *array = container_of(map, struct bpf_array, map);
  274. /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
  275. * so the programs (can be more than one that used this map) were
  276. * disconnected from events. Wait for outstanding programs to complete
  277. * and free the array
  278. */
  279. synchronize_rcu();
  280. if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
  281. bpf_array_free_percpu(array);
  282. bpf_map_area_free(array);
  283. }
  284. const struct bpf_map_ops array_map_ops = {
  285. .map_alloc_check = array_map_alloc_check,
  286. .map_alloc = array_map_alloc,
  287. .map_free = array_map_free,
  288. .map_get_next_key = array_map_get_next_key,
  289. .map_lookup_elem = array_map_lookup_elem,
  290. .map_update_elem = array_map_update_elem,
  291. .map_delete_elem = array_map_delete_elem,
  292. .map_gen_lookup = array_map_gen_lookup,
  293. };
  294. const struct bpf_map_ops percpu_array_map_ops = {
  295. .map_alloc_check = array_map_alloc_check,
  296. .map_alloc = array_map_alloc,
  297. .map_free = array_map_free,
  298. .map_get_next_key = array_map_get_next_key,
  299. .map_lookup_elem = percpu_array_map_lookup_elem,
  300. .map_update_elem = array_map_update_elem,
  301. .map_delete_elem = array_map_delete_elem,
  302. };
  303. static int fd_array_map_alloc_check(union bpf_attr *attr)
  304. {
  305. /* only file descriptors can be stored in this type of map */
  306. if (attr->value_size != sizeof(u32))
  307. return -EINVAL;
  308. return array_map_alloc_check(attr);
  309. }
  310. static void fd_array_map_free(struct bpf_map *map)
  311. {
  312. struct bpf_array *array = container_of(map, struct bpf_array, map);
  313. int i;
  314. synchronize_rcu();
  315. /* make sure it's empty */
  316. for (i = 0; i < array->map.max_entries; i++)
  317. BUG_ON(array->ptrs[i] != NULL);
  318. bpf_map_area_free(array);
  319. }
  320. static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
  321. {
  322. return NULL;
  323. }
  324. /* only called from syscall */
  325. int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value)
  326. {
  327. void **elem, *ptr;
  328. int ret = 0;
  329. if (!map->ops->map_fd_sys_lookup_elem)
  330. return -ENOTSUPP;
  331. rcu_read_lock();
  332. elem = array_map_lookup_elem(map, key);
  333. if (elem && (ptr = READ_ONCE(*elem)))
  334. *value = map->ops->map_fd_sys_lookup_elem(ptr);
  335. else
  336. ret = -ENOENT;
  337. rcu_read_unlock();
  338. return ret;
  339. }
  340. /* only called from syscall */
  341. int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
  342. void *key, void *value, u64 map_flags)
  343. {
  344. struct bpf_array *array = container_of(map, struct bpf_array, map);
  345. void *new_ptr, *old_ptr;
  346. u32 index = *(u32 *)key, ufd;
  347. if (map_flags != BPF_ANY)
  348. return -EINVAL;
  349. if (index >= array->map.max_entries)
  350. return -E2BIG;
  351. ufd = *(u32 *)value;
  352. new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
  353. if (IS_ERR(new_ptr))
  354. return PTR_ERR(new_ptr);
  355. old_ptr = xchg(array->ptrs + index, new_ptr);
  356. if (old_ptr)
  357. map->ops->map_fd_put_ptr(old_ptr);
  358. return 0;
  359. }
  360. static int fd_array_map_delete_elem(struct bpf_map *map, void *key)
  361. {
  362. struct bpf_array *array = container_of(map, struct bpf_array, map);
  363. void *old_ptr;
  364. u32 index = *(u32 *)key;
  365. if (index >= array->map.max_entries)
  366. return -E2BIG;
  367. old_ptr = xchg(array->ptrs + index, NULL);
  368. if (old_ptr) {
  369. map->ops->map_fd_put_ptr(old_ptr);
  370. return 0;
  371. } else {
  372. return -ENOENT;
  373. }
  374. }
  375. static void *prog_fd_array_get_ptr(struct bpf_map *map,
  376. struct file *map_file, int fd)
  377. {
  378. struct bpf_array *array = container_of(map, struct bpf_array, map);
  379. struct bpf_prog *prog = bpf_prog_get(fd);
  380. if (IS_ERR(prog))
  381. return prog;
  382. if (!bpf_prog_array_compatible(array, prog)) {
  383. bpf_prog_put(prog);
  384. return ERR_PTR(-EINVAL);
  385. }
  386. return prog;
  387. }
  388. static void prog_fd_array_put_ptr(void *ptr)
  389. {
  390. bpf_prog_put(ptr);
  391. }
  392. static u32 prog_fd_array_sys_lookup_elem(void *ptr)
  393. {
  394. return ((struct bpf_prog *)ptr)->aux->id;
  395. }
  396. /* decrement refcnt of all bpf_progs that are stored in this map */
  397. static void bpf_fd_array_map_clear(struct bpf_map *map)
  398. {
  399. struct bpf_array *array = container_of(map, struct bpf_array, map);
  400. int i;
  401. for (i = 0; i < array->map.max_entries; i++)
  402. fd_array_map_delete_elem(map, &i);
  403. }
  404. const struct bpf_map_ops prog_array_map_ops = {
  405. .map_alloc_check = fd_array_map_alloc_check,
  406. .map_alloc = array_map_alloc,
  407. .map_free = fd_array_map_free,
  408. .map_get_next_key = array_map_get_next_key,
  409. .map_lookup_elem = fd_array_map_lookup_elem,
  410. .map_delete_elem = fd_array_map_delete_elem,
  411. .map_fd_get_ptr = prog_fd_array_get_ptr,
  412. .map_fd_put_ptr = prog_fd_array_put_ptr,
  413. .map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem,
  414. .map_release_uref = bpf_fd_array_map_clear,
  415. };
  416. static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file,
  417. struct file *map_file)
  418. {
  419. struct bpf_event_entry *ee;
  420. ee = kzalloc(sizeof(*ee), GFP_ATOMIC);
  421. if (ee) {
  422. ee->event = perf_file->private_data;
  423. ee->perf_file = perf_file;
  424. ee->map_file = map_file;
  425. }
  426. return ee;
  427. }
  428. static void __bpf_event_entry_free(struct rcu_head *rcu)
  429. {
  430. struct bpf_event_entry *ee;
  431. ee = container_of(rcu, struct bpf_event_entry, rcu);
  432. fput(ee->perf_file);
  433. kfree(ee);
  434. }
  435. static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee)
  436. {
  437. call_rcu(&ee->rcu, __bpf_event_entry_free);
  438. }
  439. static void *perf_event_fd_array_get_ptr(struct bpf_map *map,
  440. struct file *map_file, int fd)
  441. {
  442. struct bpf_event_entry *ee;
  443. struct perf_event *event;
  444. struct file *perf_file;
  445. u64 value;
  446. perf_file = perf_event_get(fd);
  447. if (IS_ERR(perf_file))
  448. return perf_file;
  449. ee = ERR_PTR(-EOPNOTSUPP);
  450. event = perf_file->private_data;
  451. if (perf_event_read_local(event, &value, NULL, NULL) == -EOPNOTSUPP)
  452. goto err_out;
  453. ee = bpf_event_entry_gen(perf_file, map_file);
  454. if (ee)
  455. return ee;
  456. ee = ERR_PTR(-ENOMEM);
  457. err_out:
  458. fput(perf_file);
  459. return ee;
  460. }
  461. static void perf_event_fd_array_put_ptr(void *ptr)
  462. {
  463. bpf_event_entry_free_rcu(ptr);
  464. }
  465. static void perf_event_fd_array_release(struct bpf_map *map,
  466. struct file *map_file)
  467. {
  468. struct bpf_array *array = container_of(map, struct bpf_array, map);
  469. struct bpf_event_entry *ee;
  470. int i;
  471. rcu_read_lock();
  472. for (i = 0; i < array->map.max_entries; i++) {
  473. ee = READ_ONCE(array->ptrs[i]);
  474. if (ee && ee->map_file == map_file)
  475. fd_array_map_delete_elem(map, &i);
  476. }
  477. rcu_read_unlock();
  478. }
  479. const struct bpf_map_ops perf_event_array_map_ops = {
  480. .map_alloc_check = fd_array_map_alloc_check,
  481. .map_alloc = array_map_alloc,
  482. .map_free = fd_array_map_free,
  483. .map_get_next_key = array_map_get_next_key,
  484. .map_lookup_elem = fd_array_map_lookup_elem,
  485. .map_delete_elem = fd_array_map_delete_elem,
  486. .map_fd_get_ptr = perf_event_fd_array_get_ptr,
  487. .map_fd_put_ptr = perf_event_fd_array_put_ptr,
  488. .map_release = perf_event_fd_array_release,
  489. };
  490. #ifdef CONFIG_CGROUPS
  491. static void *cgroup_fd_array_get_ptr(struct bpf_map *map,
  492. struct file *map_file /* not used */,
  493. int fd)
  494. {
  495. return cgroup_get_from_fd(fd);
  496. }
  497. static void cgroup_fd_array_put_ptr(void *ptr)
  498. {
  499. /* cgroup_put free cgrp after a rcu grace period */
  500. cgroup_put(ptr);
  501. }
  502. static void cgroup_fd_array_free(struct bpf_map *map)
  503. {
  504. bpf_fd_array_map_clear(map);
  505. fd_array_map_free(map);
  506. }
  507. const struct bpf_map_ops cgroup_array_map_ops = {
  508. .map_alloc_check = fd_array_map_alloc_check,
  509. .map_alloc = array_map_alloc,
  510. .map_free = cgroup_fd_array_free,
  511. .map_get_next_key = array_map_get_next_key,
  512. .map_lookup_elem = fd_array_map_lookup_elem,
  513. .map_delete_elem = fd_array_map_delete_elem,
  514. .map_fd_get_ptr = cgroup_fd_array_get_ptr,
  515. .map_fd_put_ptr = cgroup_fd_array_put_ptr,
  516. };
  517. #endif
  518. static struct bpf_map *array_of_map_alloc(union bpf_attr *attr)
  519. {
  520. struct bpf_map *map, *inner_map_meta;
  521. inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd);
  522. if (IS_ERR(inner_map_meta))
  523. return inner_map_meta;
  524. map = array_map_alloc(attr);
  525. if (IS_ERR(map)) {
  526. bpf_map_meta_free(inner_map_meta);
  527. return map;
  528. }
  529. map->inner_map_meta = inner_map_meta;
  530. return map;
  531. }
  532. static void array_of_map_free(struct bpf_map *map)
  533. {
  534. /* map->inner_map_meta is only accessed by syscall which
  535. * is protected by fdget/fdput.
  536. */
  537. bpf_map_meta_free(map->inner_map_meta);
  538. bpf_fd_array_map_clear(map);
  539. fd_array_map_free(map);
  540. }
  541. static void *array_of_map_lookup_elem(struct bpf_map *map, void *key)
  542. {
  543. struct bpf_map **inner_map = array_map_lookup_elem(map, key);
  544. if (!inner_map)
  545. return NULL;
  546. return READ_ONCE(*inner_map);
  547. }
  548. static u32 array_of_map_gen_lookup(struct bpf_map *map,
  549. struct bpf_insn *insn_buf)
  550. {
  551. struct bpf_array *array = container_of(map, struct bpf_array, map);
  552. u32 elem_size = round_up(map->value_size, 8);
  553. struct bpf_insn *insn = insn_buf;
  554. const int ret = BPF_REG_0;
  555. const int map_ptr = BPF_REG_1;
  556. const int index = BPF_REG_2;
  557. *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
  558. *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
  559. if (map->unpriv_array) {
  560. *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 6);
  561. *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
  562. } else {
  563. *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
  564. }
  565. if (is_power_of_2(elem_size))
  566. *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
  567. else
  568. *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
  569. *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
  570. *insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0);
  571. *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1);
  572. *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
  573. *insn++ = BPF_MOV64_IMM(ret, 0);
  574. return insn - insn_buf;
  575. }
  576. const struct bpf_map_ops array_of_maps_map_ops = {
  577. .map_alloc_check = fd_array_map_alloc_check,
  578. .map_alloc = array_of_map_alloc,
  579. .map_free = array_of_map_free,
  580. .map_get_next_key = array_map_get_next_key,
  581. .map_lookup_elem = array_of_map_lookup_elem,
  582. .map_delete_elem = fd_array_map_delete_elem,
  583. .map_fd_get_ptr = bpf_map_fd_get_ptr,
  584. .map_fd_put_ptr = bpf_map_fd_put_ptr,
  585. .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
  586. .map_gen_lookup = array_of_map_gen_lookup,
  587. };