arraymap.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685
  1. /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
  2. * Copyright (c) 2016,2017 Facebook
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of version 2 of the GNU General Public
  6. * License as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. */
  13. #include <linux/bpf.h>
  14. #include <linux/err.h>
  15. #include <linux/slab.h>
  16. #include <linux/mm.h>
  17. #include <linux/filter.h>
  18. #include <linux/perf_event.h>
  19. #include "map_in_map.h"
  20. #define ARRAY_CREATE_FLAG_MASK \
  21. (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
  22. static void bpf_array_free_percpu(struct bpf_array *array)
  23. {
  24. int i;
  25. for (i = 0; i < array->map.max_entries; i++)
  26. free_percpu(array->pptrs[i]);
  27. }
  28. static int bpf_array_alloc_percpu(struct bpf_array *array)
  29. {
  30. void __percpu *ptr;
  31. int i;
  32. for (i = 0; i < array->map.max_entries; i++) {
  33. ptr = __alloc_percpu_gfp(array->elem_size, 8,
  34. GFP_USER | __GFP_NOWARN);
  35. if (!ptr) {
  36. bpf_array_free_percpu(array);
  37. return -ENOMEM;
  38. }
  39. array->pptrs[i] = ptr;
  40. }
  41. return 0;
  42. }
  43. /* Called from syscall */
  44. static struct bpf_map *array_map_alloc(union bpf_attr *attr)
  45. {
  46. bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
  47. int numa_node = bpf_map_attr_numa_node(attr);
  48. u32 elem_size, index_mask, max_entries;
  49. bool unpriv = !capable(CAP_SYS_ADMIN);
  50. struct bpf_array *array;
  51. u64 array_size, mask64;
  52. /* check sanity of attributes */
  53. if (attr->max_entries == 0 || attr->key_size != 4 ||
  54. attr->value_size == 0 ||
  55. attr->map_flags & ~ARRAY_CREATE_FLAG_MASK ||
  56. (percpu && numa_node != NUMA_NO_NODE))
  57. return ERR_PTR(-EINVAL);
  58. if (attr->value_size > KMALLOC_MAX_SIZE)
  59. /* if value_size is bigger, the user space won't be able to
  60. * access the elements.
  61. */
  62. return ERR_PTR(-E2BIG);
  63. elem_size = round_up(attr->value_size, 8);
  64. max_entries = attr->max_entries;
  65. /* On 32 bit archs roundup_pow_of_two() with max_entries that has
  66. * upper most bit set in u32 space is undefined behavior due to
  67. * resulting 1U << 32, so do it manually here in u64 space.
  68. */
  69. mask64 = fls_long(max_entries - 1);
  70. mask64 = 1ULL << mask64;
  71. mask64 -= 1;
  72. index_mask = mask64;
  73. if (unpriv) {
  74. /* round up array size to nearest power of 2,
  75. * since cpu will speculate within index_mask limits
  76. */
  77. max_entries = index_mask + 1;
  78. /* Check for overflows. */
  79. if (max_entries < attr->max_entries)
  80. return ERR_PTR(-E2BIG);
  81. }
  82. array_size = sizeof(*array);
  83. if (percpu)
  84. array_size += (u64) max_entries * sizeof(void *);
  85. else
  86. array_size += (u64) max_entries * elem_size;
  87. /* make sure there is no u32 overflow later in round_up() */
  88. if (array_size >= U32_MAX - PAGE_SIZE)
  89. return ERR_PTR(-ENOMEM);
  90. /* allocate all map elements and zero-initialize them */
  91. array = bpf_map_area_alloc(array_size, numa_node);
  92. if (!array)
  93. return ERR_PTR(-ENOMEM);
  94. array->index_mask = index_mask;
  95. array->map.unpriv_array = unpriv;
  96. /* copy mandatory map attributes */
  97. array->map.map_type = attr->map_type;
  98. array->map.key_size = attr->key_size;
  99. array->map.value_size = attr->value_size;
  100. array->map.max_entries = attr->max_entries;
  101. array->map.map_flags = attr->map_flags;
  102. array->map.numa_node = numa_node;
  103. array->elem_size = elem_size;
  104. if (!percpu)
  105. goto out;
  106. array_size += (u64) attr->max_entries * elem_size * num_possible_cpus();
  107. if (array_size >= U32_MAX - PAGE_SIZE ||
  108. bpf_array_alloc_percpu(array)) {
  109. bpf_map_area_free(array);
  110. return ERR_PTR(-ENOMEM);
  111. }
  112. out:
  113. array->map.pages = round_up(array_size, PAGE_SIZE) >> PAGE_SHIFT;
  114. return &array->map;
  115. }
  116. /* Called from syscall or from eBPF program */
  117. static void *array_map_lookup_elem(struct bpf_map *map, void *key)
  118. {
  119. struct bpf_array *array = container_of(map, struct bpf_array, map);
  120. u32 index = *(u32 *)key;
  121. if (unlikely(index >= array->map.max_entries))
  122. return NULL;
  123. return array->value + array->elem_size * (index & array->index_mask);
  124. }
  125. /* emit BPF instructions equivalent to C code of array_map_lookup_elem() */
  126. static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
  127. {
  128. struct bpf_array *array = container_of(map, struct bpf_array, map);
  129. struct bpf_insn *insn = insn_buf;
  130. u32 elem_size = round_up(map->value_size, 8);
  131. const int ret = BPF_REG_0;
  132. const int map_ptr = BPF_REG_1;
  133. const int index = BPF_REG_2;
  134. *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
  135. *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
  136. if (map->unpriv_array) {
  137. *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 4);
  138. *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
  139. } else {
  140. *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3);
  141. }
  142. if (is_power_of_2(elem_size)) {
  143. *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
  144. } else {
  145. *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
  146. }
  147. *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
  148. *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
  149. *insn++ = BPF_MOV64_IMM(ret, 0);
  150. return insn - insn_buf;
  151. }
  152. /* Called from eBPF program */
  153. static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
  154. {
  155. struct bpf_array *array = container_of(map, struct bpf_array, map);
  156. u32 index = *(u32 *)key;
  157. if (unlikely(index >= array->map.max_entries))
  158. return NULL;
  159. return this_cpu_ptr(array->pptrs[index & array->index_mask]);
  160. }
  161. int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
  162. {
  163. struct bpf_array *array = container_of(map, struct bpf_array, map);
  164. u32 index = *(u32 *)key;
  165. void __percpu *pptr;
  166. int cpu, off = 0;
  167. u32 size;
  168. if (unlikely(index >= array->map.max_entries))
  169. return -ENOENT;
  170. /* per_cpu areas are zero-filled and bpf programs can only
  171. * access 'value_size' of them, so copying rounded areas
  172. * will not leak any kernel data
  173. */
  174. size = round_up(map->value_size, 8);
  175. rcu_read_lock();
  176. pptr = array->pptrs[index & array->index_mask];
  177. for_each_possible_cpu(cpu) {
  178. bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size);
  179. off += size;
  180. }
  181. rcu_read_unlock();
  182. return 0;
  183. }
  184. /* Called from syscall */
  185. static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
  186. {
  187. struct bpf_array *array = container_of(map, struct bpf_array, map);
  188. u32 index = key ? *(u32 *)key : U32_MAX;
  189. u32 *next = (u32 *)next_key;
  190. if (index >= array->map.max_entries) {
  191. *next = 0;
  192. return 0;
  193. }
  194. if (index == array->map.max_entries - 1)
  195. return -ENOENT;
  196. *next = index + 1;
  197. return 0;
  198. }
  199. /* Called from syscall or from eBPF program */
  200. static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
  201. u64 map_flags)
  202. {
  203. struct bpf_array *array = container_of(map, struct bpf_array, map);
  204. u32 index = *(u32 *)key;
  205. if (unlikely(map_flags > BPF_EXIST))
  206. /* unknown flags */
  207. return -EINVAL;
  208. if (unlikely(index >= array->map.max_entries))
  209. /* all elements were pre-allocated, cannot insert a new one */
  210. return -E2BIG;
  211. if (unlikely(map_flags == BPF_NOEXIST))
  212. /* all elements already exist */
  213. return -EEXIST;
  214. if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
  215. memcpy(this_cpu_ptr(array->pptrs[index & array->index_mask]),
  216. value, map->value_size);
  217. else
  218. memcpy(array->value +
  219. array->elem_size * (index & array->index_mask),
  220. value, map->value_size);
  221. return 0;
  222. }
  223. int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
  224. u64 map_flags)
  225. {
  226. struct bpf_array *array = container_of(map, struct bpf_array, map);
  227. u32 index = *(u32 *)key;
  228. void __percpu *pptr;
  229. int cpu, off = 0;
  230. u32 size;
  231. if (unlikely(map_flags > BPF_EXIST))
  232. /* unknown flags */
  233. return -EINVAL;
  234. if (unlikely(index >= array->map.max_entries))
  235. /* all elements were pre-allocated, cannot insert a new one */
  236. return -E2BIG;
  237. if (unlikely(map_flags == BPF_NOEXIST))
  238. /* all elements already exist */
  239. return -EEXIST;
  240. /* the user space will provide round_up(value_size, 8) bytes that
  241. * will be copied into per-cpu area. bpf programs can only access
  242. * value_size of it. During lookup the same extra bytes will be
  243. * returned or zeros which were zero-filled by percpu_alloc,
  244. * so no kernel data leaks possible
  245. */
  246. size = round_up(map->value_size, 8);
  247. rcu_read_lock();
  248. pptr = array->pptrs[index & array->index_mask];
  249. for_each_possible_cpu(cpu) {
  250. bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size);
  251. off += size;
  252. }
  253. rcu_read_unlock();
  254. return 0;
  255. }
  256. /* Called from syscall or from eBPF program */
  257. static int array_map_delete_elem(struct bpf_map *map, void *key)
  258. {
  259. return -EINVAL;
  260. }
  261. /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
  262. static void array_map_free(struct bpf_map *map)
  263. {
  264. struct bpf_array *array = container_of(map, struct bpf_array, map);
  265. /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
  266. * so the programs (can be more than one that used this map) were
  267. * disconnected from events. Wait for outstanding programs to complete
  268. * and free the array
  269. */
  270. synchronize_rcu();
  271. if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
  272. bpf_array_free_percpu(array);
  273. bpf_map_area_free(array);
  274. }
  275. const struct bpf_map_ops array_map_ops = {
  276. .map_alloc = array_map_alloc,
  277. .map_free = array_map_free,
  278. .map_get_next_key = array_map_get_next_key,
  279. .map_lookup_elem = array_map_lookup_elem,
  280. .map_update_elem = array_map_update_elem,
  281. .map_delete_elem = array_map_delete_elem,
  282. .map_gen_lookup = array_map_gen_lookup,
  283. };
  284. const struct bpf_map_ops percpu_array_map_ops = {
  285. .map_alloc = array_map_alloc,
  286. .map_free = array_map_free,
  287. .map_get_next_key = array_map_get_next_key,
  288. .map_lookup_elem = percpu_array_map_lookup_elem,
  289. .map_update_elem = array_map_update_elem,
  290. .map_delete_elem = array_map_delete_elem,
  291. };
  292. static struct bpf_map *fd_array_map_alloc(union bpf_attr *attr)
  293. {
  294. /* only file descriptors can be stored in this type of map */
  295. if (attr->value_size != sizeof(u32))
  296. return ERR_PTR(-EINVAL);
  297. return array_map_alloc(attr);
  298. }
  299. static void fd_array_map_free(struct bpf_map *map)
  300. {
  301. struct bpf_array *array = container_of(map, struct bpf_array, map);
  302. int i;
  303. synchronize_rcu();
  304. /* make sure it's empty */
  305. for (i = 0; i < array->map.max_entries; i++)
  306. BUG_ON(array->ptrs[i] != NULL);
  307. bpf_map_area_free(array);
  308. }
  309. static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
  310. {
  311. return NULL;
  312. }
  313. /* only called from syscall */
  314. int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value)
  315. {
  316. void **elem, *ptr;
  317. int ret = 0;
  318. if (!map->ops->map_fd_sys_lookup_elem)
  319. return -ENOTSUPP;
  320. rcu_read_lock();
  321. elem = array_map_lookup_elem(map, key);
  322. if (elem && (ptr = READ_ONCE(*elem)))
  323. *value = map->ops->map_fd_sys_lookup_elem(ptr);
  324. else
  325. ret = -ENOENT;
  326. rcu_read_unlock();
  327. return ret;
  328. }
  329. /* only called from syscall */
  330. int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
  331. void *key, void *value, u64 map_flags)
  332. {
  333. struct bpf_array *array = container_of(map, struct bpf_array, map);
  334. void *new_ptr, *old_ptr;
  335. u32 index = *(u32 *)key, ufd;
  336. if (map_flags != BPF_ANY)
  337. return -EINVAL;
  338. if (index >= array->map.max_entries)
  339. return -E2BIG;
  340. ufd = *(u32 *)value;
  341. new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
  342. if (IS_ERR(new_ptr))
  343. return PTR_ERR(new_ptr);
  344. old_ptr = xchg(array->ptrs + index, new_ptr);
  345. if (old_ptr)
  346. map->ops->map_fd_put_ptr(old_ptr);
  347. return 0;
  348. }
  349. static int fd_array_map_delete_elem(struct bpf_map *map, void *key)
  350. {
  351. struct bpf_array *array = container_of(map, struct bpf_array, map);
  352. void *old_ptr;
  353. u32 index = *(u32 *)key;
  354. if (index >= array->map.max_entries)
  355. return -E2BIG;
  356. old_ptr = xchg(array->ptrs + index, NULL);
  357. if (old_ptr) {
  358. map->ops->map_fd_put_ptr(old_ptr);
  359. return 0;
  360. } else {
  361. return -ENOENT;
  362. }
  363. }
  364. static void *prog_fd_array_get_ptr(struct bpf_map *map,
  365. struct file *map_file, int fd)
  366. {
  367. struct bpf_array *array = container_of(map, struct bpf_array, map);
  368. struct bpf_prog *prog = bpf_prog_get(fd);
  369. if (IS_ERR(prog))
  370. return prog;
  371. if (!bpf_prog_array_compatible(array, prog)) {
  372. bpf_prog_put(prog);
  373. return ERR_PTR(-EINVAL);
  374. }
  375. return prog;
  376. }
  377. static void prog_fd_array_put_ptr(void *ptr)
  378. {
  379. bpf_prog_put(ptr);
  380. }
  381. static u32 prog_fd_array_sys_lookup_elem(void *ptr)
  382. {
  383. return ((struct bpf_prog *)ptr)->aux->id;
  384. }
  385. /* decrement refcnt of all bpf_progs that are stored in this map */
  386. void bpf_fd_array_map_clear(struct bpf_map *map)
  387. {
  388. struct bpf_array *array = container_of(map, struct bpf_array, map);
  389. int i;
  390. for (i = 0; i < array->map.max_entries; i++)
  391. fd_array_map_delete_elem(map, &i);
  392. }
  393. const struct bpf_map_ops prog_array_map_ops = {
  394. .map_alloc = fd_array_map_alloc,
  395. .map_free = fd_array_map_free,
  396. .map_get_next_key = array_map_get_next_key,
  397. .map_lookup_elem = fd_array_map_lookup_elem,
  398. .map_delete_elem = fd_array_map_delete_elem,
  399. .map_fd_get_ptr = prog_fd_array_get_ptr,
  400. .map_fd_put_ptr = prog_fd_array_put_ptr,
  401. .map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem,
  402. };
  403. static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file,
  404. struct file *map_file)
  405. {
  406. struct bpf_event_entry *ee;
  407. ee = kzalloc(sizeof(*ee), GFP_ATOMIC);
  408. if (ee) {
  409. ee->event = perf_file->private_data;
  410. ee->perf_file = perf_file;
  411. ee->map_file = map_file;
  412. }
  413. return ee;
  414. }
  415. static void __bpf_event_entry_free(struct rcu_head *rcu)
  416. {
  417. struct bpf_event_entry *ee;
  418. ee = container_of(rcu, struct bpf_event_entry, rcu);
  419. fput(ee->perf_file);
  420. kfree(ee);
  421. }
  422. static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee)
  423. {
  424. call_rcu(&ee->rcu, __bpf_event_entry_free);
  425. }
  426. static void *perf_event_fd_array_get_ptr(struct bpf_map *map,
  427. struct file *map_file, int fd)
  428. {
  429. struct bpf_event_entry *ee;
  430. struct perf_event *event;
  431. struct file *perf_file;
  432. u64 value;
  433. perf_file = perf_event_get(fd);
  434. if (IS_ERR(perf_file))
  435. return perf_file;
  436. ee = ERR_PTR(-EOPNOTSUPP);
  437. event = perf_file->private_data;
  438. if (perf_event_read_local(event, &value, NULL, NULL) == -EOPNOTSUPP)
  439. goto err_out;
  440. ee = bpf_event_entry_gen(perf_file, map_file);
  441. if (ee)
  442. return ee;
  443. ee = ERR_PTR(-ENOMEM);
  444. err_out:
  445. fput(perf_file);
  446. return ee;
  447. }
  448. static void perf_event_fd_array_put_ptr(void *ptr)
  449. {
  450. bpf_event_entry_free_rcu(ptr);
  451. }
  452. static void perf_event_fd_array_release(struct bpf_map *map,
  453. struct file *map_file)
  454. {
  455. struct bpf_array *array = container_of(map, struct bpf_array, map);
  456. struct bpf_event_entry *ee;
  457. int i;
  458. rcu_read_lock();
  459. for (i = 0; i < array->map.max_entries; i++) {
  460. ee = READ_ONCE(array->ptrs[i]);
  461. if (ee && ee->map_file == map_file)
  462. fd_array_map_delete_elem(map, &i);
  463. }
  464. rcu_read_unlock();
  465. }
  466. const struct bpf_map_ops perf_event_array_map_ops = {
  467. .map_alloc = fd_array_map_alloc,
  468. .map_free = fd_array_map_free,
  469. .map_get_next_key = array_map_get_next_key,
  470. .map_lookup_elem = fd_array_map_lookup_elem,
  471. .map_delete_elem = fd_array_map_delete_elem,
  472. .map_fd_get_ptr = perf_event_fd_array_get_ptr,
  473. .map_fd_put_ptr = perf_event_fd_array_put_ptr,
  474. .map_release = perf_event_fd_array_release,
  475. };
  476. #ifdef CONFIG_CGROUPS
  477. static void *cgroup_fd_array_get_ptr(struct bpf_map *map,
  478. struct file *map_file /* not used */,
  479. int fd)
  480. {
  481. return cgroup_get_from_fd(fd);
  482. }
  483. static void cgroup_fd_array_put_ptr(void *ptr)
  484. {
  485. /* cgroup_put free cgrp after a rcu grace period */
  486. cgroup_put(ptr);
  487. }
  488. static void cgroup_fd_array_free(struct bpf_map *map)
  489. {
  490. bpf_fd_array_map_clear(map);
  491. fd_array_map_free(map);
  492. }
  493. const struct bpf_map_ops cgroup_array_map_ops = {
  494. .map_alloc = fd_array_map_alloc,
  495. .map_free = cgroup_fd_array_free,
  496. .map_get_next_key = array_map_get_next_key,
  497. .map_lookup_elem = fd_array_map_lookup_elem,
  498. .map_delete_elem = fd_array_map_delete_elem,
  499. .map_fd_get_ptr = cgroup_fd_array_get_ptr,
  500. .map_fd_put_ptr = cgroup_fd_array_put_ptr,
  501. };
  502. #endif
  503. static struct bpf_map *array_of_map_alloc(union bpf_attr *attr)
  504. {
  505. struct bpf_map *map, *inner_map_meta;
  506. inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd);
  507. if (IS_ERR(inner_map_meta))
  508. return inner_map_meta;
  509. map = fd_array_map_alloc(attr);
  510. if (IS_ERR(map)) {
  511. bpf_map_meta_free(inner_map_meta);
  512. return map;
  513. }
  514. map->inner_map_meta = inner_map_meta;
  515. return map;
  516. }
  517. static void array_of_map_free(struct bpf_map *map)
  518. {
  519. /* map->inner_map_meta is only accessed by syscall which
  520. * is protected by fdget/fdput.
  521. */
  522. bpf_map_meta_free(map->inner_map_meta);
  523. bpf_fd_array_map_clear(map);
  524. fd_array_map_free(map);
  525. }
  526. static void *array_of_map_lookup_elem(struct bpf_map *map, void *key)
  527. {
  528. struct bpf_map **inner_map = array_map_lookup_elem(map, key);
  529. if (!inner_map)
  530. return NULL;
  531. return READ_ONCE(*inner_map);
  532. }
  533. static u32 array_of_map_gen_lookup(struct bpf_map *map,
  534. struct bpf_insn *insn_buf)
  535. {
  536. struct bpf_array *array = container_of(map, struct bpf_array, map);
  537. u32 elem_size = round_up(map->value_size, 8);
  538. struct bpf_insn *insn = insn_buf;
  539. const int ret = BPF_REG_0;
  540. const int map_ptr = BPF_REG_1;
  541. const int index = BPF_REG_2;
  542. *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
  543. *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
  544. if (map->unpriv_array) {
  545. *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 6);
  546. *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
  547. } else {
  548. *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
  549. }
  550. if (is_power_of_2(elem_size))
  551. *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
  552. else
  553. *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
  554. *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
  555. *insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0);
  556. *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1);
  557. *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
  558. *insn++ = BPF_MOV64_IMM(ret, 0);
  559. return insn - insn_buf;
  560. }
  561. const struct bpf_map_ops array_of_maps_map_ops = {
  562. .map_alloc = array_of_map_alloc,
  563. .map_free = array_of_map_free,
  564. .map_get_next_key = array_map_get_next_key,
  565. .map_lookup_elem = array_of_map_lookup_elem,
  566. .map_delete_elem = fd_array_map_delete_elem,
  567. .map_fd_get_ptr = bpf_map_fd_get_ptr,
  568. .map_fd_put_ptr = bpf_map_fd_put_ptr,
  569. .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
  570. .map_gen_lookup = array_of_map_gen_lookup,
  571. };