arraymap.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648
  1. /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
  2. * Copyright (c) 2016,2017 Facebook
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of version 2 of the GNU General Public
  6. * License as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. */
  13. #include <linux/bpf.h>
  14. #include <linux/err.h>
  15. #include <linux/slab.h>
  16. #include <linux/mm.h>
  17. #include <linux/filter.h>
  18. #include <linux/perf_event.h>
  19. #include "map_in_map.h"
  20. #define ARRAY_CREATE_FLAG_MASK \
  21. (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
  22. static void bpf_array_free_percpu(struct bpf_array *array)
  23. {
  24. int i;
  25. for (i = 0; i < array->map.max_entries; i++)
  26. free_percpu(array->pptrs[i]);
  27. }
  28. static int bpf_array_alloc_percpu(struct bpf_array *array)
  29. {
  30. void __percpu *ptr;
  31. int i;
  32. for (i = 0; i < array->map.max_entries; i++) {
  33. ptr = __alloc_percpu_gfp(array->elem_size, 8,
  34. GFP_USER | __GFP_NOWARN);
  35. if (!ptr) {
  36. bpf_array_free_percpu(array);
  37. return -ENOMEM;
  38. }
  39. array->pptrs[i] = ptr;
  40. }
  41. return 0;
  42. }
  43. /* Called from syscall */
  44. static struct bpf_map *array_map_alloc(union bpf_attr *attr)
  45. {
  46. bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
  47. int numa_node = bpf_map_attr_numa_node(attr);
  48. struct bpf_array *array;
  49. u64 array_size;
  50. u32 elem_size;
  51. /* check sanity of attributes */
  52. if (attr->max_entries == 0 || attr->key_size != 4 ||
  53. attr->value_size == 0 ||
  54. attr->map_flags & ~ARRAY_CREATE_FLAG_MASK ||
  55. (percpu && numa_node != NUMA_NO_NODE))
  56. return ERR_PTR(-EINVAL);
  57. if (attr->value_size > KMALLOC_MAX_SIZE)
  58. /* if value_size is bigger, the user space won't be able to
  59. * access the elements.
  60. */
  61. return ERR_PTR(-E2BIG);
  62. elem_size = round_up(attr->value_size, 8);
  63. array_size = sizeof(*array);
  64. if (percpu)
  65. array_size += (u64) attr->max_entries * sizeof(void *);
  66. else
  67. array_size += (u64) attr->max_entries * elem_size;
  68. /* make sure there is no u32 overflow later in round_up() */
  69. if (array_size >= U32_MAX - PAGE_SIZE)
  70. return ERR_PTR(-ENOMEM);
  71. /* allocate all map elements and zero-initialize them */
  72. array = bpf_map_area_alloc(array_size, numa_node);
  73. if (!array)
  74. return ERR_PTR(-ENOMEM);
  75. /* copy mandatory map attributes */
  76. array->map.map_type = attr->map_type;
  77. array->map.key_size = attr->key_size;
  78. array->map.value_size = attr->value_size;
  79. array->map.max_entries = attr->max_entries;
  80. array->map.map_flags = attr->map_flags;
  81. array->map.numa_node = numa_node;
  82. array->elem_size = elem_size;
  83. if (!percpu)
  84. goto out;
  85. array_size += (u64) attr->max_entries * elem_size * num_possible_cpus();
  86. if (array_size >= U32_MAX - PAGE_SIZE ||
  87. elem_size > PCPU_MIN_UNIT_SIZE || bpf_array_alloc_percpu(array)) {
  88. bpf_map_area_free(array);
  89. return ERR_PTR(-ENOMEM);
  90. }
  91. out:
  92. array->map.pages = round_up(array_size, PAGE_SIZE) >> PAGE_SHIFT;
  93. return &array->map;
  94. }
  95. /* Called from syscall or from eBPF program */
  96. static void *array_map_lookup_elem(struct bpf_map *map, void *key)
  97. {
  98. struct bpf_array *array = container_of(map, struct bpf_array, map);
  99. u32 index = *(u32 *)key;
  100. if (unlikely(index >= array->map.max_entries))
  101. return NULL;
  102. return array->value + array->elem_size * index;
  103. }
  104. /* emit BPF instructions equivalent to C code of array_map_lookup_elem() */
  105. static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
  106. {
  107. struct bpf_insn *insn = insn_buf;
  108. u32 elem_size = round_up(map->value_size, 8);
  109. const int ret = BPF_REG_0;
  110. const int map_ptr = BPF_REG_1;
  111. const int index = BPF_REG_2;
  112. *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
  113. *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
  114. *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3);
  115. if (is_power_of_2(elem_size)) {
  116. *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
  117. } else {
  118. *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
  119. }
  120. *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
  121. *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
  122. *insn++ = BPF_MOV64_IMM(ret, 0);
  123. return insn - insn_buf;
  124. }
  125. /* Called from eBPF program */
  126. static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
  127. {
  128. struct bpf_array *array = container_of(map, struct bpf_array, map);
  129. u32 index = *(u32 *)key;
  130. if (unlikely(index >= array->map.max_entries))
  131. return NULL;
  132. return this_cpu_ptr(array->pptrs[index]);
  133. }
  134. int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
  135. {
  136. struct bpf_array *array = container_of(map, struct bpf_array, map);
  137. u32 index = *(u32 *)key;
  138. void __percpu *pptr;
  139. int cpu, off = 0;
  140. u32 size;
  141. if (unlikely(index >= array->map.max_entries))
  142. return -ENOENT;
  143. /* per_cpu areas are zero-filled and bpf programs can only
  144. * access 'value_size' of them, so copying rounded areas
  145. * will not leak any kernel data
  146. */
  147. size = round_up(map->value_size, 8);
  148. rcu_read_lock();
  149. pptr = array->pptrs[index];
  150. for_each_possible_cpu(cpu) {
  151. bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size);
  152. off += size;
  153. }
  154. rcu_read_unlock();
  155. return 0;
  156. }
  157. /* Called from syscall */
  158. static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
  159. {
  160. struct bpf_array *array = container_of(map, struct bpf_array, map);
  161. u32 index = key ? *(u32 *)key : U32_MAX;
  162. u32 *next = (u32 *)next_key;
  163. if (index >= array->map.max_entries) {
  164. *next = 0;
  165. return 0;
  166. }
  167. if (index == array->map.max_entries - 1)
  168. return -ENOENT;
  169. *next = index + 1;
  170. return 0;
  171. }
  172. /* Called from syscall or from eBPF program */
  173. static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
  174. u64 map_flags)
  175. {
  176. struct bpf_array *array = container_of(map, struct bpf_array, map);
  177. u32 index = *(u32 *)key;
  178. if (unlikely(map_flags > BPF_EXIST))
  179. /* unknown flags */
  180. return -EINVAL;
  181. if (unlikely(index >= array->map.max_entries))
  182. /* all elements were pre-allocated, cannot insert a new one */
  183. return -E2BIG;
  184. if (unlikely(map_flags == BPF_NOEXIST))
  185. /* all elements already exist */
  186. return -EEXIST;
  187. if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
  188. memcpy(this_cpu_ptr(array->pptrs[index]),
  189. value, map->value_size);
  190. else
  191. memcpy(array->value + array->elem_size * index,
  192. value, map->value_size);
  193. return 0;
  194. }
  195. int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
  196. u64 map_flags)
  197. {
  198. struct bpf_array *array = container_of(map, struct bpf_array, map);
  199. u32 index = *(u32 *)key;
  200. void __percpu *pptr;
  201. int cpu, off = 0;
  202. u32 size;
  203. if (unlikely(map_flags > BPF_EXIST))
  204. /* unknown flags */
  205. return -EINVAL;
  206. if (unlikely(index >= array->map.max_entries))
  207. /* all elements were pre-allocated, cannot insert a new one */
  208. return -E2BIG;
  209. if (unlikely(map_flags == BPF_NOEXIST))
  210. /* all elements already exist */
  211. return -EEXIST;
  212. /* the user space will provide round_up(value_size, 8) bytes that
  213. * will be copied into per-cpu area. bpf programs can only access
  214. * value_size of it. During lookup the same extra bytes will be
  215. * returned or zeros which were zero-filled by percpu_alloc,
  216. * so no kernel data leaks possible
  217. */
  218. size = round_up(map->value_size, 8);
  219. rcu_read_lock();
  220. pptr = array->pptrs[index];
  221. for_each_possible_cpu(cpu) {
  222. bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size);
  223. off += size;
  224. }
  225. rcu_read_unlock();
  226. return 0;
  227. }
  228. /* Called from syscall or from eBPF program */
  229. static int array_map_delete_elem(struct bpf_map *map, void *key)
  230. {
  231. return -EINVAL;
  232. }
  233. /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
  234. static void array_map_free(struct bpf_map *map)
  235. {
  236. struct bpf_array *array = container_of(map, struct bpf_array, map);
  237. /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
  238. * so the programs (can be more than one that used this map) were
  239. * disconnected from events. Wait for outstanding programs to complete
  240. * and free the array
  241. */
  242. synchronize_rcu();
  243. if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
  244. bpf_array_free_percpu(array);
  245. bpf_map_area_free(array);
  246. }
  247. const struct bpf_map_ops array_map_ops = {
  248. .map_alloc = array_map_alloc,
  249. .map_free = array_map_free,
  250. .map_get_next_key = array_map_get_next_key,
  251. .map_lookup_elem = array_map_lookup_elem,
  252. .map_update_elem = array_map_update_elem,
  253. .map_delete_elem = array_map_delete_elem,
  254. .map_gen_lookup = array_map_gen_lookup,
  255. };
  256. const struct bpf_map_ops percpu_array_map_ops = {
  257. .map_alloc = array_map_alloc,
  258. .map_free = array_map_free,
  259. .map_get_next_key = array_map_get_next_key,
  260. .map_lookup_elem = percpu_array_map_lookup_elem,
  261. .map_update_elem = array_map_update_elem,
  262. .map_delete_elem = array_map_delete_elem,
  263. };
  264. static struct bpf_map *fd_array_map_alloc(union bpf_attr *attr)
  265. {
  266. /* only file descriptors can be stored in this type of map */
  267. if (attr->value_size != sizeof(u32))
  268. return ERR_PTR(-EINVAL);
  269. return array_map_alloc(attr);
  270. }
  271. static void fd_array_map_free(struct bpf_map *map)
  272. {
  273. struct bpf_array *array = container_of(map, struct bpf_array, map);
  274. int i;
  275. synchronize_rcu();
  276. /* make sure it's empty */
  277. for (i = 0; i < array->map.max_entries; i++)
  278. BUG_ON(array->ptrs[i] != NULL);
  279. bpf_map_area_free(array);
  280. }
  281. static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
  282. {
  283. return NULL;
  284. }
  285. /* only called from syscall */
  286. int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value)
  287. {
  288. void **elem, *ptr;
  289. int ret = 0;
  290. if (!map->ops->map_fd_sys_lookup_elem)
  291. return -ENOTSUPP;
  292. rcu_read_lock();
  293. elem = array_map_lookup_elem(map, key);
  294. if (elem && (ptr = READ_ONCE(*elem)))
  295. *value = map->ops->map_fd_sys_lookup_elem(ptr);
  296. else
  297. ret = -ENOENT;
  298. rcu_read_unlock();
  299. return ret;
  300. }
  301. /* only called from syscall */
  302. int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
  303. void *key, void *value, u64 map_flags)
  304. {
  305. struct bpf_array *array = container_of(map, struct bpf_array, map);
  306. void *new_ptr, *old_ptr;
  307. u32 index = *(u32 *)key, ufd;
  308. if (map_flags != BPF_ANY)
  309. return -EINVAL;
  310. if (index >= array->map.max_entries)
  311. return -E2BIG;
  312. ufd = *(u32 *)value;
  313. new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
  314. if (IS_ERR(new_ptr))
  315. return PTR_ERR(new_ptr);
  316. old_ptr = xchg(array->ptrs + index, new_ptr);
  317. if (old_ptr)
  318. map->ops->map_fd_put_ptr(old_ptr);
  319. return 0;
  320. }
  321. static int fd_array_map_delete_elem(struct bpf_map *map, void *key)
  322. {
  323. struct bpf_array *array = container_of(map, struct bpf_array, map);
  324. void *old_ptr;
  325. u32 index = *(u32 *)key;
  326. if (index >= array->map.max_entries)
  327. return -E2BIG;
  328. old_ptr = xchg(array->ptrs + index, NULL);
  329. if (old_ptr) {
  330. map->ops->map_fd_put_ptr(old_ptr);
  331. return 0;
  332. } else {
  333. return -ENOENT;
  334. }
  335. }
  336. static void *prog_fd_array_get_ptr(struct bpf_map *map,
  337. struct file *map_file, int fd)
  338. {
  339. struct bpf_array *array = container_of(map, struct bpf_array, map);
  340. struct bpf_prog *prog = bpf_prog_get(fd);
  341. if (IS_ERR(prog))
  342. return prog;
  343. if (!bpf_prog_array_compatible(array, prog)) {
  344. bpf_prog_put(prog);
  345. return ERR_PTR(-EINVAL);
  346. }
  347. return prog;
  348. }
  349. static void prog_fd_array_put_ptr(void *ptr)
  350. {
  351. bpf_prog_put(ptr);
  352. }
  353. static u32 prog_fd_array_sys_lookup_elem(void *ptr)
  354. {
  355. return ((struct bpf_prog *)ptr)->aux->id;
  356. }
  357. /* decrement refcnt of all bpf_progs that are stored in this map */
  358. void bpf_fd_array_map_clear(struct bpf_map *map)
  359. {
  360. struct bpf_array *array = container_of(map, struct bpf_array, map);
  361. int i;
  362. for (i = 0; i < array->map.max_entries; i++)
  363. fd_array_map_delete_elem(map, &i);
  364. }
  365. const struct bpf_map_ops prog_array_map_ops = {
  366. .map_alloc = fd_array_map_alloc,
  367. .map_free = fd_array_map_free,
  368. .map_get_next_key = array_map_get_next_key,
  369. .map_lookup_elem = fd_array_map_lookup_elem,
  370. .map_delete_elem = fd_array_map_delete_elem,
  371. .map_fd_get_ptr = prog_fd_array_get_ptr,
  372. .map_fd_put_ptr = prog_fd_array_put_ptr,
  373. .map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem,
  374. };
  375. static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file,
  376. struct file *map_file)
  377. {
  378. struct bpf_event_entry *ee;
  379. ee = kzalloc(sizeof(*ee), GFP_ATOMIC);
  380. if (ee) {
  381. ee->event = perf_file->private_data;
  382. ee->perf_file = perf_file;
  383. ee->map_file = map_file;
  384. }
  385. return ee;
  386. }
  387. static void __bpf_event_entry_free(struct rcu_head *rcu)
  388. {
  389. struct bpf_event_entry *ee;
  390. ee = container_of(rcu, struct bpf_event_entry, rcu);
  391. fput(ee->perf_file);
  392. kfree(ee);
  393. }
  394. static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee)
  395. {
  396. call_rcu(&ee->rcu, __bpf_event_entry_free);
  397. }
  398. static void *perf_event_fd_array_get_ptr(struct bpf_map *map,
  399. struct file *map_file, int fd)
  400. {
  401. struct bpf_event_entry *ee;
  402. struct perf_event *event;
  403. struct file *perf_file;
  404. u64 value;
  405. perf_file = perf_event_get(fd);
  406. if (IS_ERR(perf_file))
  407. return perf_file;
  408. ee = ERR_PTR(-EOPNOTSUPP);
  409. event = perf_file->private_data;
  410. if (perf_event_read_local(event, &value, NULL, NULL) == -EOPNOTSUPP)
  411. goto err_out;
  412. ee = bpf_event_entry_gen(perf_file, map_file);
  413. if (ee)
  414. return ee;
  415. ee = ERR_PTR(-ENOMEM);
  416. err_out:
  417. fput(perf_file);
  418. return ee;
  419. }
  420. static void perf_event_fd_array_put_ptr(void *ptr)
  421. {
  422. bpf_event_entry_free_rcu(ptr);
  423. }
  424. static void perf_event_fd_array_release(struct bpf_map *map,
  425. struct file *map_file)
  426. {
  427. struct bpf_array *array = container_of(map, struct bpf_array, map);
  428. struct bpf_event_entry *ee;
  429. int i;
  430. rcu_read_lock();
  431. for (i = 0; i < array->map.max_entries; i++) {
  432. ee = READ_ONCE(array->ptrs[i]);
  433. if (ee && ee->map_file == map_file)
  434. fd_array_map_delete_elem(map, &i);
  435. }
  436. rcu_read_unlock();
  437. }
  438. const struct bpf_map_ops perf_event_array_map_ops = {
  439. .map_alloc = fd_array_map_alloc,
  440. .map_free = fd_array_map_free,
  441. .map_get_next_key = array_map_get_next_key,
  442. .map_lookup_elem = fd_array_map_lookup_elem,
  443. .map_delete_elem = fd_array_map_delete_elem,
  444. .map_fd_get_ptr = perf_event_fd_array_get_ptr,
  445. .map_fd_put_ptr = perf_event_fd_array_put_ptr,
  446. .map_release = perf_event_fd_array_release,
  447. };
  448. #ifdef CONFIG_CGROUPS
  449. static void *cgroup_fd_array_get_ptr(struct bpf_map *map,
  450. struct file *map_file /* not used */,
  451. int fd)
  452. {
  453. return cgroup_get_from_fd(fd);
  454. }
  455. static void cgroup_fd_array_put_ptr(void *ptr)
  456. {
  457. /* cgroup_put free cgrp after a rcu grace period */
  458. cgroup_put(ptr);
  459. }
  460. static void cgroup_fd_array_free(struct bpf_map *map)
  461. {
  462. bpf_fd_array_map_clear(map);
  463. fd_array_map_free(map);
  464. }
  465. const struct bpf_map_ops cgroup_array_map_ops = {
  466. .map_alloc = fd_array_map_alloc,
  467. .map_free = cgroup_fd_array_free,
  468. .map_get_next_key = array_map_get_next_key,
  469. .map_lookup_elem = fd_array_map_lookup_elem,
  470. .map_delete_elem = fd_array_map_delete_elem,
  471. .map_fd_get_ptr = cgroup_fd_array_get_ptr,
  472. .map_fd_put_ptr = cgroup_fd_array_put_ptr,
  473. };
  474. #endif
  475. static struct bpf_map *array_of_map_alloc(union bpf_attr *attr)
  476. {
  477. struct bpf_map *map, *inner_map_meta;
  478. inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd);
  479. if (IS_ERR(inner_map_meta))
  480. return inner_map_meta;
  481. map = fd_array_map_alloc(attr);
  482. if (IS_ERR(map)) {
  483. bpf_map_meta_free(inner_map_meta);
  484. return map;
  485. }
  486. map->inner_map_meta = inner_map_meta;
  487. return map;
  488. }
  489. static void array_of_map_free(struct bpf_map *map)
  490. {
  491. /* map->inner_map_meta is only accessed by syscall which
  492. * is protected by fdget/fdput.
  493. */
  494. bpf_map_meta_free(map->inner_map_meta);
  495. bpf_fd_array_map_clear(map);
  496. fd_array_map_free(map);
  497. }
  498. static void *array_of_map_lookup_elem(struct bpf_map *map, void *key)
  499. {
  500. struct bpf_map **inner_map = array_map_lookup_elem(map, key);
  501. if (!inner_map)
  502. return NULL;
  503. return READ_ONCE(*inner_map);
  504. }
  505. static u32 array_of_map_gen_lookup(struct bpf_map *map,
  506. struct bpf_insn *insn_buf)
  507. {
  508. u32 elem_size = round_up(map->value_size, 8);
  509. struct bpf_insn *insn = insn_buf;
  510. const int ret = BPF_REG_0;
  511. const int map_ptr = BPF_REG_1;
  512. const int index = BPF_REG_2;
  513. *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
  514. *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
  515. *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
  516. if (is_power_of_2(elem_size))
  517. *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
  518. else
  519. *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
  520. *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
  521. *insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0);
  522. *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1);
  523. *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
  524. *insn++ = BPF_MOV64_IMM(ret, 0);
  525. return insn - insn_buf;
  526. }
  527. const struct bpf_map_ops array_of_maps_map_ops = {
  528. .map_alloc = array_of_map_alloc,
  529. .map_free = array_of_map_free,
  530. .map_get_next_key = array_map_get_next_key,
  531. .map_lookup_elem = array_of_map_lookup_elem,
  532. .map_delete_elem = fd_array_map_delete_elem,
  533. .map_fd_get_ptr = bpf_map_fd_get_ptr,
  534. .map_fd_put_ptr = bpf_map_fd_put_ptr,
  535. .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
  536. .map_gen_lookup = array_of_map_gen_lookup,
  537. };