hashtab.c 34 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381
  1. /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
  2. * Copyright (c) 2016 Facebook
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of version 2 of the GNU General Public
  6. * License as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. */
  13. #include <linux/bpf.h>
  14. #include <linux/jhash.h>
  15. #include <linux/filter.h>
  16. #include <linux/rculist_nulls.h>
  17. #include "percpu_freelist.h"
  18. #include "bpf_lru_list.h"
  19. #include "map_in_map.h"
  20. #define HTAB_CREATE_FLAG_MASK \
  21. (BPF_F_NO_PREALLOC | BPF_F_NO_COMMON_LRU | BPF_F_NUMA_NODE)
  22. struct bucket {
  23. struct hlist_nulls_head head;
  24. raw_spinlock_t lock;
  25. };
  26. struct bpf_htab {
  27. struct bpf_map map;
  28. struct bucket *buckets;
  29. void *elems;
  30. union {
  31. struct pcpu_freelist freelist;
  32. struct bpf_lru lru;
  33. };
  34. struct htab_elem *__percpu *extra_elems;
  35. atomic_t count; /* number of elements in this hashtable */
  36. u32 n_buckets; /* number of hash buckets */
  37. u32 elem_size; /* size of each element in bytes */
  38. };
  39. /* each htab element is struct htab_elem + key + value */
  40. struct htab_elem {
  41. union {
  42. struct hlist_nulls_node hash_node;
  43. struct {
  44. void *padding;
  45. union {
  46. struct bpf_htab *htab;
  47. struct pcpu_freelist_node fnode;
  48. };
  49. };
  50. };
  51. union {
  52. struct rcu_head rcu;
  53. struct bpf_lru_node lru_node;
  54. };
  55. u32 hash;
  56. char key[0] __aligned(8);
  57. };
  58. static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node);
  59. static bool htab_is_lru(const struct bpf_htab *htab)
  60. {
  61. return htab->map.map_type == BPF_MAP_TYPE_LRU_HASH ||
  62. htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
  63. }
  64. static bool htab_is_percpu(const struct bpf_htab *htab)
  65. {
  66. return htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH ||
  67. htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
  68. }
  69. static bool htab_is_prealloc(const struct bpf_htab *htab)
  70. {
  71. return !(htab->map.map_flags & BPF_F_NO_PREALLOC);
  72. }
  73. static inline void htab_elem_set_ptr(struct htab_elem *l, u32 key_size,
  74. void __percpu *pptr)
  75. {
  76. *(void __percpu **)(l->key + key_size) = pptr;
  77. }
  78. static inline void __percpu *htab_elem_get_ptr(struct htab_elem *l, u32 key_size)
  79. {
  80. return *(void __percpu **)(l->key + key_size);
  81. }
  82. static void *fd_htab_map_get_ptr(const struct bpf_map *map, struct htab_elem *l)
  83. {
  84. return *(void **)(l->key + roundup(map->key_size, 8));
  85. }
  86. static struct htab_elem *get_htab_elem(struct bpf_htab *htab, int i)
  87. {
  88. return (struct htab_elem *) (htab->elems + i * htab->elem_size);
  89. }
  90. static void htab_free_elems(struct bpf_htab *htab)
  91. {
  92. int i;
  93. if (!htab_is_percpu(htab))
  94. goto free_elems;
  95. for (i = 0; i < htab->map.max_entries; i++) {
  96. void __percpu *pptr;
  97. pptr = htab_elem_get_ptr(get_htab_elem(htab, i),
  98. htab->map.key_size);
  99. free_percpu(pptr);
  100. }
  101. free_elems:
  102. bpf_map_area_free(htab->elems);
  103. }
  104. static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key,
  105. u32 hash)
  106. {
  107. struct bpf_lru_node *node = bpf_lru_pop_free(&htab->lru, hash);
  108. struct htab_elem *l;
  109. if (node) {
  110. l = container_of(node, struct htab_elem, lru_node);
  111. memcpy(l->key, key, htab->map.key_size);
  112. return l;
  113. }
  114. return NULL;
  115. }
  116. static int prealloc_init(struct bpf_htab *htab)
  117. {
  118. u32 num_entries = htab->map.max_entries;
  119. int err = -ENOMEM, i;
  120. if (!htab_is_percpu(htab) && !htab_is_lru(htab))
  121. num_entries += num_possible_cpus();
  122. htab->elems = bpf_map_area_alloc(htab->elem_size * num_entries,
  123. htab->map.numa_node);
  124. if (!htab->elems)
  125. return -ENOMEM;
  126. if (!htab_is_percpu(htab))
  127. goto skip_percpu_elems;
  128. for (i = 0; i < num_entries; i++) {
  129. u32 size = round_up(htab->map.value_size, 8);
  130. void __percpu *pptr;
  131. pptr = __alloc_percpu_gfp(size, 8, GFP_USER | __GFP_NOWARN);
  132. if (!pptr)
  133. goto free_elems;
  134. htab_elem_set_ptr(get_htab_elem(htab, i), htab->map.key_size,
  135. pptr);
  136. }
  137. skip_percpu_elems:
  138. if (htab_is_lru(htab))
  139. err = bpf_lru_init(&htab->lru,
  140. htab->map.map_flags & BPF_F_NO_COMMON_LRU,
  141. offsetof(struct htab_elem, hash) -
  142. offsetof(struct htab_elem, lru_node),
  143. htab_lru_map_delete_node,
  144. htab);
  145. else
  146. err = pcpu_freelist_init(&htab->freelist);
  147. if (err)
  148. goto free_elems;
  149. if (htab_is_lru(htab))
  150. bpf_lru_populate(&htab->lru, htab->elems,
  151. offsetof(struct htab_elem, lru_node),
  152. htab->elem_size, num_entries);
  153. else
  154. pcpu_freelist_populate(&htab->freelist,
  155. htab->elems + offsetof(struct htab_elem, fnode),
  156. htab->elem_size, num_entries);
  157. return 0;
  158. free_elems:
  159. htab_free_elems(htab);
  160. return err;
  161. }
  162. static void prealloc_destroy(struct bpf_htab *htab)
  163. {
  164. htab_free_elems(htab);
  165. if (htab_is_lru(htab))
  166. bpf_lru_destroy(&htab->lru);
  167. else
  168. pcpu_freelist_destroy(&htab->freelist);
  169. }
  170. static int alloc_extra_elems(struct bpf_htab *htab)
  171. {
  172. struct htab_elem *__percpu *pptr, *l_new;
  173. struct pcpu_freelist_node *l;
  174. int cpu;
  175. pptr = __alloc_percpu_gfp(sizeof(struct htab_elem *), 8,
  176. GFP_USER | __GFP_NOWARN);
  177. if (!pptr)
  178. return -ENOMEM;
  179. for_each_possible_cpu(cpu) {
  180. l = pcpu_freelist_pop(&htab->freelist);
  181. /* pop will succeed, since prealloc_init()
  182. * preallocated extra num_possible_cpus elements
  183. */
  184. l_new = container_of(l, struct htab_elem, fnode);
  185. *per_cpu_ptr(pptr, cpu) = l_new;
  186. }
  187. htab->extra_elems = pptr;
  188. return 0;
  189. }
  190. /* Called from syscall */
  191. static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
  192. {
  193. bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
  194. attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
  195. bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH ||
  196. attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
  197. /* percpu_lru means each cpu has its own LRU list.
  198. * it is different from BPF_MAP_TYPE_PERCPU_HASH where
  199. * the map's value itself is percpu. percpu_lru has
  200. * nothing to do with the map's value.
  201. */
  202. bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU);
  203. bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
  204. int numa_node = bpf_map_attr_numa_node(attr);
  205. struct bpf_htab *htab;
  206. int err, i;
  207. u64 cost;
  208. BUILD_BUG_ON(offsetof(struct htab_elem, htab) !=
  209. offsetof(struct htab_elem, hash_node.pprev));
  210. BUILD_BUG_ON(offsetof(struct htab_elem, fnode.next) !=
  211. offsetof(struct htab_elem, hash_node.pprev));
  212. if (lru && !capable(CAP_SYS_ADMIN))
  213. /* LRU implementation is much complicated than other
  214. * maps. Hence, limit to CAP_SYS_ADMIN for now.
  215. */
  216. return ERR_PTR(-EPERM);
  217. if (attr->map_flags & ~HTAB_CREATE_FLAG_MASK)
  218. /* reserved bits should not be used */
  219. return ERR_PTR(-EINVAL);
  220. if (!lru && percpu_lru)
  221. return ERR_PTR(-EINVAL);
  222. if (lru && !prealloc)
  223. return ERR_PTR(-ENOTSUPP);
  224. if (numa_node != NUMA_NO_NODE && (percpu || percpu_lru))
  225. return ERR_PTR(-EINVAL);
  226. htab = kzalloc(sizeof(*htab), GFP_USER);
  227. if (!htab)
  228. return ERR_PTR(-ENOMEM);
  229. /* mandatory map attributes */
  230. htab->map.map_type = attr->map_type;
  231. htab->map.key_size = attr->key_size;
  232. htab->map.value_size = attr->value_size;
  233. htab->map.max_entries = attr->max_entries;
  234. htab->map.map_flags = attr->map_flags;
  235. htab->map.numa_node = numa_node;
  236. /* check sanity of attributes.
  237. * value_size == 0 may be allowed in the future to use map as a set
  238. */
  239. err = -EINVAL;
  240. if (htab->map.max_entries == 0 || htab->map.key_size == 0 ||
  241. htab->map.value_size == 0)
  242. goto free_htab;
  243. if (percpu_lru) {
  244. /* ensure each CPU's lru list has >=1 elements.
  245. * since we are at it, make each lru list has the same
  246. * number of elements.
  247. */
  248. htab->map.max_entries = roundup(attr->max_entries,
  249. num_possible_cpus());
  250. if (htab->map.max_entries < attr->max_entries)
  251. htab->map.max_entries = rounddown(attr->max_entries,
  252. num_possible_cpus());
  253. }
  254. /* hash table size must be power of 2 */
  255. htab->n_buckets = roundup_pow_of_two(htab->map.max_entries);
  256. err = -E2BIG;
  257. if (htab->map.key_size > MAX_BPF_STACK)
  258. /* eBPF programs initialize keys on stack, so they cannot be
  259. * larger than max stack size
  260. */
  261. goto free_htab;
  262. if (htab->map.value_size >= KMALLOC_MAX_SIZE -
  263. MAX_BPF_STACK - sizeof(struct htab_elem))
  264. /* if value_size is bigger, the user space won't be able to
  265. * access the elements via bpf syscall. This check also makes
  266. * sure that the elem_size doesn't overflow and it's
  267. * kmalloc-able later in htab_map_update_elem()
  268. */
  269. goto free_htab;
  270. htab->elem_size = sizeof(struct htab_elem) +
  271. round_up(htab->map.key_size, 8);
  272. if (percpu)
  273. htab->elem_size += sizeof(void *);
  274. else
  275. htab->elem_size += round_up(htab->map.value_size, 8);
  276. /* prevent zero size kmalloc and check for u32 overflow */
  277. if (htab->n_buckets == 0 ||
  278. htab->n_buckets > U32_MAX / sizeof(struct bucket))
  279. goto free_htab;
  280. cost = (u64) htab->n_buckets * sizeof(struct bucket) +
  281. (u64) htab->elem_size * htab->map.max_entries;
  282. if (percpu)
  283. cost += (u64) round_up(htab->map.value_size, 8) *
  284. num_possible_cpus() * htab->map.max_entries;
  285. else
  286. cost += (u64) htab->elem_size * num_possible_cpus();
  287. if (cost >= U32_MAX - PAGE_SIZE)
  288. /* make sure page count doesn't overflow */
  289. goto free_htab;
  290. htab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
  291. /* if map size is larger than memlock limit, reject it early */
  292. err = bpf_map_precharge_memlock(htab->map.pages);
  293. if (err)
  294. goto free_htab;
  295. err = -ENOMEM;
  296. htab->buckets = bpf_map_area_alloc(htab->n_buckets *
  297. sizeof(struct bucket),
  298. htab->map.numa_node);
  299. if (!htab->buckets)
  300. goto free_htab;
  301. for (i = 0; i < htab->n_buckets; i++) {
  302. INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i);
  303. raw_spin_lock_init(&htab->buckets[i].lock);
  304. }
  305. if (prealloc) {
  306. err = prealloc_init(htab);
  307. if (err)
  308. goto free_buckets;
  309. if (!percpu && !lru) {
  310. /* lru itself can remove the least used element, so
  311. * there is no need for an extra elem during map_update.
  312. */
  313. err = alloc_extra_elems(htab);
  314. if (err)
  315. goto free_prealloc;
  316. }
  317. }
  318. return &htab->map;
  319. free_prealloc:
  320. prealloc_destroy(htab);
  321. free_buckets:
  322. bpf_map_area_free(htab->buckets);
  323. free_htab:
  324. kfree(htab);
  325. return ERR_PTR(err);
  326. }
  327. static inline u32 htab_map_hash(const void *key, u32 key_len)
  328. {
  329. return jhash(key, key_len, 0);
  330. }
  331. static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash)
  332. {
  333. return &htab->buckets[hash & (htab->n_buckets - 1)];
  334. }
  335. static inline struct hlist_nulls_head *select_bucket(struct bpf_htab *htab, u32 hash)
  336. {
  337. return &__select_bucket(htab, hash)->head;
  338. }
  339. /* this lookup function can only be called with bucket lock taken */
  340. static struct htab_elem *lookup_elem_raw(struct hlist_nulls_head *head, u32 hash,
  341. void *key, u32 key_size)
  342. {
  343. struct hlist_nulls_node *n;
  344. struct htab_elem *l;
  345. hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
  346. if (l->hash == hash && !memcmp(&l->key, key, key_size))
  347. return l;
  348. return NULL;
  349. }
  350. /* can be called without bucket lock. it will repeat the loop in
  351. * the unlikely event when elements moved from one bucket into another
  352. * while link list is being walked
  353. */
  354. static struct htab_elem *lookup_nulls_elem_raw(struct hlist_nulls_head *head,
  355. u32 hash, void *key,
  356. u32 key_size, u32 n_buckets)
  357. {
  358. struct hlist_nulls_node *n;
  359. struct htab_elem *l;
  360. again:
  361. hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
  362. if (l->hash == hash && !memcmp(&l->key, key, key_size))
  363. return l;
  364. if (unlikely(get_nulls_value(n) != (hash & (n_buckets - 1))))
  365. goto again;
  366. return NULL;
  367. }
  368. /* Called from syscall or from eBPF program directly, so
  369. * arguments have to match bpf_map_lookup_elem() exactly.
  370. * The return value is adjusted by BPF instructions
  371. * in htab_map_gen_lookup().
  372. */
  373. static void *__htab_map_lookup_elem(struct bpf_map *map, void *key)
  374. {
  375. struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
  376. struct hlist_nulls_head *head;
  377. struct htab_elem *l;
  378. u32 hash, key_size;
  379. /* Must be called with rcu_read_lock. */
  380. WARN_ON_ONCE(!rcu_read_lock_held());
  381. key_size = map->key_size;
  382. hash = htab_map_hash(key, key_size);
  383. head = select_bucket(htab, hash);
  384. l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets);
  385. return l;
  386. }
  387. static void *htab_map_lookup_elem(struct bpf_map *map, void *key)
  388. {
  389. struct htab_elem *l = __htab_map_lookup_elem(map, key);
  390. if (l)
  391. return l->key + round_up(map->key_size, 8);
  392. return NULL;
  393. }
  394. /* inline bpf_map_lookup_elem() call.
  395. * Instead of:
  396. * bpf_prog
  397. * bpf_map_lookup_elem
  398. * map->ops->map_lookup_elem
  399. * htab_map_lookup_elem
  400. * __htab_map_lookup_elem
  401. * do:
  402. * bpf_prog
  403. * __htab_map_lookup_elem
  404. */
  405. static u32 htab_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
  406. {
  407. struct bpf_insn *insn = insn_buf;
  408. const int ret = BPF_REG_0;
  409. *insn++ = BPF_EMIT_CALL((u64 (*)(u64, u64, u64, u64, u64))__htab_map_lookup_elem);
  410. *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1);
  411. *insn++ = BPF_ALU64_IMM(BPF_ADD, ret,
  412. offsetof(struct htab_elem, key) +
  413. round_up(map->key_size, 8));
  414. return insn - insn_buf;
  415. }
  416. static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key)
  417. {
  418. struct htab_elem *l = __htab_map_lookup_elem(map, key);
  419. if (l) {
  420. bpf_lru_node_set_ref(&l->lru_node);
  421. return l->key + round_up(map->key_size, 8);
  422. }
  423. return NULL;
  424. }
  425. static u32 htab_lru_map_gen_lookup(struct bpf_map *map,
  426. struct bpf_insn *insn_buf)
  427. {
  428. struct bpf_insn *insn = insn_buf;
  429. const int ret = BPF_REG_0;
  430. const int ref_reg = BPF_REG_1;
  431. *insn++ = BPF_EMIT_CALL((u64 (*)(u64, u64, u64, u64, u64))__htab_map_lookup_elem);
  432. *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 4);
  433. *insn++ = BPF_LDX_MEM(BPF_B, ref_reg, ret,
  434. offsetof(struct htab_elem, lru_node) +
  435. offsetof(struct bpf_lru_node, ref));
  436. *insn++ = BPF_JMP_IMM(BPF_JNE, ref_reg, 0, 1);
  437. *insn++ = BPF_ST_MEM(BPF_B, ret,
  438. offsetof(struct htab_elem, lru_node) +
  439. offsetof(struct bpf_lru_node, ref),
  440. 1);
  441. *insn++ = BPF_ALU64_IMM(BPF_ADD, ret,
  442. offsetof(struct htab_elem, key) +
  443. round_up(map->key_size, 8));
  444. return insn - insn_buf;
  445. }
  446. /* It is called from the bpf_lru_list when the LRU needs to delete
  447. * older elements from the htab.
  448. */
  449. static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node)
  450. {
  451. struct bpf_htab *htab = (struct bpf_htab *)arg;
  452. struct htab_elem *l = NULL, *tgt_l;
  453. struct hlist_nulls_head *head;
  454. struct hlist_nulls_node *n;
  455. unsigned long flags;
  456. struct bucket *b;
  457. tgt_l = container_of(node, struct htab_elem, lru_node);
  458. b = __select_bucket(htab, tgt_l->hash);
  459. head = &b->head;
  460. raw_spin_lock_irqsave(&b->lock, flags);
  461. hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
  462. if (l == tgt_l) {
  463. hlist_nulls_del_rcu(&l->hash_node);
  464. break;
  465. }
  466. raw_spin_unlock_irqrestore(&b->lock, flags);
  467. return l == tgt_l;
  468. }
  469. /* Called from syscall */
  470. static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
  471. {
  472. struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
  473. struct hlist_nulls_head *head;
  474. struct htab_elem *l, *next_l;
  475. u32 hash, key_size;
  476. int i = 0;
  477. WARN_ON_ONCE(!rcu_read_lock_held());
  478. key_size = map->key_size;
  479. if (!key)
  480. goto find_first_elem;
  481. hash = htab_map_hash(key, key_size);
  482. head = select_bucket(htab, hash);
  483. /* lookup the key */
  484. l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets);
  485. if (!l)
  486. goto find_first_elem;
  487. /* key was found, get next key in the same bucket */
  488. next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_next_rcu(&l->hash_node)),
  489. struct htab_elem, hash_node);
  490. if (next_l) {
  491. /* if next elem in this hash list is non-zero, just return it */
  492. memcpy(next_key, next_l->key, key_size);
  493. return 0;
  494. }
  495. /* no more elements in this hash list, go to the next bucket */
  496. i = hash & (htab->n_buckets - 1);
  497. i++;
  498. find_first_elem:
  499. /* iterate over buckets */
  500. for (; i < htab->n_buckets; i++) {
  501. head = select_bucket(htab, i);
  502. /* pick first element in the bucket */
  503. next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_first_rcu(head)),
  504. struct htab_elem, hash_node);
  505. if (next_l) {
  506. /* if it's not empty, just return it */
  507. memcpy(next_key, next_l->key, key_size);
  508. return 0;
  509. }
  510. }
  511. /* iterated over all buckets and all elements */
  512. return -ENOENT;
  513. }
  514. static void htab_elem_free(struct bpf_htab *htab, struct htab_elem *l)
  515. {
  516. if (htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH)
  517. free_percpu(htab_elem_get_ptr(l, htab->map.key_size));
  518. kfree(l);
  519. }
  520. static void htab_elem_free_rcu(struct rcu_head *head)
  521. {
  522. struct htab_elem *l = container_of(head, struct htab_elem, rcu);
  523. struct bpf_htab *htab = l->htab;
  524. /* must increment bpf_prog_active to avoid kprobe+bpf triggering while
  525. * we're calling kfree, otherwise deadlock is possible if kprobes
  526. * are placed somewhere inside of slub
  527. */
  528. preempt_disable();
  529. __this_cpu_inc(bpf_prog_active);
  530. htab_elem_free(htab, l);
  531. __this_cpu_dec(bpf_prog_active);
  532. preempt_enable();
  533. }
  534. static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
  535. {
  536. struct bpf_map *map = &htab->map;
  537. if (map->ops->map_fd_put_ptr) {
  538. void *ptr = fd_htab_map_get_ptr(map, l);
  539. map->ops->map_fd_put_ptr(ptr);
  540. }
  541. if (htab_is_prealloc(htab)) {
  542. pcpu_freelist_push(&htab->freelist, &l->fnode);
  543. } else {
  544. atomic_dec(&htab->count);
  545. l->htab = htab;
  546. call_rcu(&l->rcu, htab_elem_free_rcu);
  547. }
  548. }
  549. static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr,
  550. void *value, bool onallcpus)
  551. {
  552. if (!onallcpus) {
  553. /* copy true value_size bytes */
  554. memcpy(this_cpu_ptr(pptr), value, htab->map.value_size);
  555. } else {
  556. u32 size = round_up(htab->map.value_size, 8);
  557. int off = 0, cpu;
  558. for_each_possible_cpu(cpu) {
  559. bpf_long_memcpy(per_cpu_ptr(pptr, cpu),
  560. value + off, size);
  561. off += size;
  562. }
  563. }
  564. }
  565. static bool fd_htab_map_needs_adjust(const struct bpf_htab *htab)
  566. {
  567. return htab->map.map_type == BPF_MAP_TYPE_HASH_OF_MAPS &&
  568. BITS_PER_LONG == 64;
  569. }
  570. static u32 htab_size_value(const struct bpf_htab *htab, bool percpu)
  571. {
  572. u32 size = htab->map.value_size;
  573. if (percpu || fd_htab_map_needs_adjust(htab))
  574. size = round_up(size, 8);
  575. return size;
  576. }
  577. static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
  578. void *value, u32 key_size, u32 hash,
  579. bool percpu, bool onallcpus,
  580. struct htab_elem *old_elem)
  581. {
  582. u32 size = htab_size_value(htab, percpu);
  583. bool prealloc = htab_is_prealloc(htab);
  584. struct htab_elem *l_new, **pl_new;
  585. void __percpu *pptr;
  586. if (prealloc) {
  587. if (old_elem) {
  588. /* if we're updating the existing element,
  589. * use per-cpu extra elems to avoid freelist_pop/push
  590. */
  591. pl_new = this_cpu_ptr(htab->extra_elems);
  592. l_new = *pl_new;
  593. *pl_new = old_elem;
  594. } else {
  595. struct pcpu_freelist_node *l;
  596. l = pcpu_freelist_pop(&htab->freelist);
  597. if (!l)
  598. return ERR_PTR(-E2BIG);
  599. l_new = container_of(l, struct htab_elem, fnode);
  600. }
  601. } else {
  602. if (atomic_inc_return(&htab->count) > htab->map.max_entries)
  603. if (!old_elem) {
  604. /* when map is full and update() is replacing
  605. * old element, it's ok to allocate, since
  606. * old element will be freed immediately.
  607. * Otherwise return an error
  608. */
  609. atomic_dec(&htab->count);
  610. return ERR_PTR(-E2BIG);
  611. }
  612. l_new = kmalloc_node(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN,
  613. htab->map.numa_node);
  614. if (!l_new)
  615. return ERR_PTR(-ENOMEM);
  616. }
  617. memcpy(l_new->key, key, key_size);
  618. if (percpu) {
  619. if (prealloc) {
  620. pptr = htab_elem_get_ptr(l_new, key_size);
  621. } else {
  622. /* alloc_percpu zero-fills */
  623. pptr = __alloc_percpu_gfp(size, 8,
  624. GFP_ATOMIC | __GFP_NOWARN);
  625. if (!pptr) {
  626. kfree(l_new);
  627. return ERR_PTR(-ENOMEM);
  628. }
  629. }
  630. pcpu_copy_value(htab, pptr, value, onallcpus);
  631. if (!prealloc)
  632. htab_elem_set_ptr(l_new, key_size, pptr);
  633. } else {
  634. memcpy(l_new->key + round_up(key_size, 8), value, size);
  635. }
  636. l_new->hash = hash;
  637. return l_new;
  638. }
  639. static int check_flags(struct bpf_htab *htab, struct htab_elem *l_old,
  640. u64 map_flags)
  641. {
  642. if (l_old && map_flags == BPF_NOEXIST)
  643. /* elem already exists */
  644. return -EEXIST;
  645. if (!l_old && map_flags == BPF_EXIST)
  646. /* elem doesn't exist, cannot update it */
  647. return -ENOENT;
  648. return 0;
  649. }
  650. /* Called from syscall or from eBPF program */
  651. static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
  652. u64 map_flags)
  653. {
  654. struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
  655. struct htab_elem *l_new = NULL, *l_old;
  656. struct hlist_nulls_head *head;
  657. unsigned long flags;
  658. struct bucket *b;
  659. u32 key_size, hash;
  660. int ret;
  661. if (unlikely(map_flags > BPF_EXIST))
  662. /* unknown flags */
  663. return -EINVAL;
  664. WARN_ON_ONCE(!rcu_read_lock_held());
  665. key_size = map->key_size;
  666. hash = htab_map_hash(key, key_size);
  667. b = __select_bucket(htab, hash);
  668. head = &b->head;
  669. /* bpf_map_update_elem() can be called in_irq() */
  670. raw_spin_lock_irqsave(&b->lock, flags);
  671. l_old = lookup_elem_raw(head, hash, key, key_size);
  672. ret = check_flags(htab, l_old, map_flags);
  673. if (ret)
  674. goto err;
  675. l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false,
  676. l_old);
  677. if (IS_ERR(l_new)) {
  678. /* all pre-allocated elements are in use or memory exhausted */
  679. ret = PTR_ERR(l_new);
  680. goto err;
  681. }
  682. /* add new element to the head of the list, so that
  683. * concurrent search will find it before old elem
  684. */
  685. hlist_nulls_add_head_rcu(&l_new->hash_node, head);
  686. if (l_old) {
  687. hlist_nulls_del_rcu(&l_old->hash_node);
  688. if (!htab_is_prealloc(htab))
  689. free_htab_elem(htab, l_old);
  690. }
  691. ret = 0;
  692. err:
  693. raw_spin_unlock_irqrestore(&b->lock, flags);
  694. return ret;
  695. }
  696. static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
  697. u64 map_flags)
  698. {
  699. struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
  700. struct htab_elem *l_new, *l_old = NULL;
  701. struct hlist_nulls_head *head;
  702. unsigned long flags;
  703. struct bucket *b;
  704. u32 key_size, hash;
  705. int ret;
  706. if (unlikely(map_flags > BPF_EXIST))
  707. /* unknown flags */
  708. return -EINVAL;
  709. WARN_ON_ONCE(!rcu_read_lock_held());
  710. key_size = map->key_size;
  711. hash = htab_map_hash(key, key_size);
  712. b = __select_bucket(htab, hash);
  713. head = &b->head;
  714. /* For LRU, we need to alloc before taking bucket's
  715. * spinlock because getting free nodes from LRU may need
  716. * to remove older elements from htab and this removal
  717. * operation will need a bucket lock.
  718. */
  719. l_new = prealloc_lru_pop(htab, key, hash);
  720. if (!l_new)
  721. return -ENOMEM;
  722. memcpy(l_new->key + round_up(map->key_size, 8), value, map->value_size);
  723. /* bpf_map_update_elem() can be called in_irq() */
  724. raw_spin_lock_irqsave(&b->lock, flags);
  725. l_old = lookup_elem_raw(head, hash, key, key_size);
  726. ret = check_flags(htab, l_old, map_flags);
  727. if (ret)
  728. goto err;
  729. /* add new element to the head of the list, so that
  730. * concurrent search will find it before old elem
  731. */
  732. hlist_nulls_add_head_rcu(&l_new->hash_node, head);
  733. if (l_old) {
  734. bpf_lru_node_set_ref(&l_new->lru_node);
  735. hlist_nulls_del_rcu(&l_old->hash_node);
  736. }
  737. ret = 0;
  738. err:
  739. raw_spin_unlock_irqrestore(&b->lock, flags);
  740. if (ret)
  741. bpf_lru_push_free(&htab->lru, &l_new->lru_node);
  742. else if (l_old)
  743. bpf_lru_push_free(&htab->lru, &l_old->lru_node);
  744. return ret;
  745. }
  746. static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
  747. void *value, u64 map_flags,
  748. bool onallcpus)
  749. {
  750. struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
  751. struct htab_elem *l_new = NULL, *l_old;
  752. struct hlist_nulls_head *head;
  753. unsigned long flags;
  754. struct bucket *b;
  755. u32 key_size, hash;
  756. int ret;
  757. if (unlikely(map_flags > BPF_EXIST))
  758. /* unknown flags */
  759. return -EINVAL;
  760. WARN_ON_ONCE(!rcu_read_lock_held());
  761. key_size = map->key_size;
  762. hash = htab_map_hash(key, key_size);
  763. b = __select_bucket(htab, hash);
  764. head = &b->head;
  765. /* bpf_map_update_elem() can be called in_irq() */
  766. raw_spin_lock_irqsave(&b->lock, flags);
  767. l_old = lookup_elem_raw(head, hash, key, key_size);
  768. ret = check_flags(htab, l_old, map_flags);
  769. if (ret)
  770. goto err;
  771. if (l_old) {
  772. /* per-cpu hash map can update value in-place */
  773. pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size),
  774. value, onallcpus);
  775. } else {
  776. l_new = alloc_htab_elem(htab, key, value, key_size,
  777. hash, true, onallcpus, NULL);
  778. if (IS_ERR(l_new)) {
  779. ret = PTR_ERR(l_new);
  780. goto err;
  781. }
  782. hlist_nulls_add_head_rcu(&l_new->hash_node, head);
  783. }
  784. ret = 0;
  785. err:
  786. raw_spin_unlock_irqrestore(&b->lock, flags);
  787. return ret;
  788. }
  789. static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
  790. void *value, u64 map_flags,
  791. bool onallcpus)
  792. {
  793. struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
  794. struct htab_elem *l_new = NULL, *l_old;
  795. struct hlist_nulls_head *head;
  796. unsigned long flags;
  797. struct bucket *b;
  798. u32 key_size, hash;
  799. int ret;
  800. if (unlikely(map_flags > BPF_EXIST))
  801. /* unknown flags */
  802. return -EINVAL;
  803. WARN_ON_ONCE(!rcu_read_lock_held());
  804. key_size = map->key_size;
  805. hash = htab_map_hash(key, key_size);
  806. b = __select_bucket(htab, hash);
  807. head = &b->head;
  808. /* For LRU, we need to alloc before taking bucket's
  809. * spinlock because LRU's elem alloc may need
  810. * to remove older elem from htab and this removal
  811. * operation will need a bucket lock.
  812. */
  813. if (map_flags != BPF_EXIST) {
  814. l_new = prealloc_lru_pop(htab, key, hash);
  815. if (!l_new)
  816. return -ENOMEM;
  817. }
  818. /* bpf_map_update_elem() can be called in_irq() */
  819. raw_spin_lock_irqsave(&b->lock, flags);
  820. l_old = lookup_elem_raw(head, hash, key, key_size);
  821. ret = check_flags(htab, l_old, map_flags);
  822. if (ret)
  823. goto err;
  824. if (l_old) {
  825. bpf_lru_node_set_ref(&l_old->lru_node);
  826. /* per-cpu hash map can update value in-place */
  827. pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size),
  828. value, onallcpus);
  829. } else {
  830. pcpu_copy_value(htab, htab_elem_get_ptr(l_new, key_size),
  831. value, onallcpus);
  832. hlist_nulls_add_head_rcu(&l_new->hash_node, head);
  833. l_new = NULL;
  834. }
  835. ret = 0;
  836. err:
  837. raw_spin_unlock_irqrestore(&b->lock, flags);
  838. if (l_new)
  839. bpf_lru_push_free(&htab->lru, &l_new->lru_node);
  840. return ret;
  841. }
  842. static int htab_percpu_map_update_elem(struct bpf_map *map, void *key,
  843. void *value, u64 map_flags)
  844. {
  845. return __htab_percpu_map_update_elem(map, key, value, map_flags, false);
  846. }
  847. static int htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
  848. void *value, u64 map_flags)
  849. {
  850. return __htab_lru_percpu_map_update_elem(map, key, value, map_flags,
  851. false);
  852. }
  853. /* Called from syscall or from eBPF program */
  854. static int htab_map_delete_elem(struct bpf_map *map, void *key)
  855. {
  856. struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
  857. struct hlist_nulls_head *head;
  858. struct bucket *b;
  859. struct htab_elem *l;
  860. unsigned long flags;
  861. u32 hash, key_size;
  862. int ret = -ENOENT;
  863. WARN_ON_ONCE(!rcu_read_lock_held());
  864. key_size = map->key_size;
  865. hash = htab_map_hash(key, key_size);
  866. b = __select_bucket(htab, hash);
  867. head = &b->head;
  868. raw_spin_lock_irqsave(&b->lock, flags);
  869. l = lookup_elem_raw(head, hash, key, key_size);
  870. if (l) {
  871. hlist_nulls_del_rcu(&l->hash_node);
  872. free_htab_elem(htab, l);
  873. ret = 0;
  874. }
  875. raw_spin_unlock_irqrestore(&b->lock, flags);
  876. return ret;
  877. }
  878. static int htab_lru_map_delete_elem(struct bpf_map *map, void *key)
  879. {
  880. struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
  881. struct hlist_nulls_head *head;
  882. struct bucket *b;
  883. struct htab_elem *l;
  884. unsigned long flags;
  885. u32 hash, key_size;
  886. int ret = -ENOENT;
  887. WARN_ON_ONCE(!rcu_read_lock_held());
  888. key_size = map->key_size;
  889. hash = htab_map_hash(key, key_size);
  890. b = __select_bucket(htab, hash);
  891. head = &b->head;
  892. raw_spin_lock_irqsave(&b->lock, flags);
  893. l = lookup_elem_raw(head, hash, key, key_size);
  894. if (l) {
  895. hlist_nulls_del_rcu(&l->hash_node);
  896. ret = 0;
  897. }
  898. raw_spin_unlock_irqrestore(&b->lock, flags);
  899. if (l)
  900. bpf_lru_push_free(&htab->lru, &l->lru_node);
  901. return ret;
  902. }
  903. static void delete_all_elements(struct bpf_htab *htab)
  904. {
  905. int i;
  906. for (i = 0; i < htab->n_buckets; i++) {
  907. struct hlist_nulls_head *head = select_bucket(htab, i);
  908. struct hlist_nulls_node *n;
  909. struct htab_elem *l;
  910. hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
  911. hlist_nulls_del_rcu(&l->hash_node);
  912. htab_elem_free(htab, l);
  913. }
  914. }
  915. }
  916. /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
  917. static void htab_map_free(struct bpf_map *map)
  918. {
  919. struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
  920. /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
  921. * so the programs (can be more than one that used this map) were
  922. * disconnected from events. Wait for outstanding critical sections in
  923. * these programs to complete
  924. */
  925. synchronize_rcu();
  926. /* some of free_htab_elem() callbacks for elements of this map may
  927. * not have executed. Wait for them.
  928. */
  929. rcu_barrier();
  930. if (!htab_is_prealloc(htab))
  931. delete_all_elements(htab);
  932. else
  933. prealloc_destroy(htab);
  934. free_percpu(htab->extra_elems);
  935. bpf_map_area_free(htab->buckets);
  936. kfree(htab);
  937. }
  938. const struct bpf_map_ops htab_map_ops = {
  939. .map_alloc = htab_map_alloc,
  940. .map_free = htab_map_free,
  941. .map_get_next_key = htab_map_get_next_key,
  942. .map_lookup_elem = htab_map_lookup_elem,
  943. .map_update_elem = htab_map_update_elem,
  944. .map_delete_elem = htab_map_delete_elem,
  945. .map_gen_lookup = htab_map_gen_lookup,
  946. };
  947. const struct bpf_map_ops htab_lru_map_ops = {
  948. .map_alloc = htab_map_alloc,
  949. .map_free = htab_map_free,
  950. .map_get_next_key = htab_map_get_next_key,
  951. .map_lookup_elem = htab_lru_map_lookup_elem,
  952. .map_update_elem = htab_lru_map_update_elem,
  953. .map_delete_elem = htab_lru_map_delete_elem,
  954. .map_gen_lookup = htab_lru_map_gen_lookup,
  955. };
  956. /* Called from eBPF program */
  957. static void *htab_percpu_map_lookup_elem(struct bpf_map *map, void *key)
  958. {
  959. struct htab_elem *l = __htab_map_lookup_elem(map, key);
  960. if (l)
  961. return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size));
  962. else
  963. return NULL;
  964. }
  965. static void *htab_lru_percpu_map_lookup_elem(struct bpf_map *map, void *key)
  966. {
  967. struct htab_elem *l = __htab_map_lookup_elem(map, key);
  968. if (l) {
  969. bpf_lru_node_set_ref(&l->lru_node);
  970. return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size));
  971. }
  972. return NULL;
  973. }
  974. int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value)
  975. {
  976. struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
  977. struct htab_elem *l;
  978. void __percpu *pptr;
  979. int ret = -ENOENT;
  980. int cpu, off = 0;
  981. u32 size;
  982. /* per_cpu areas are zero-filled and bpf programs can only
  983. * access 'value_size' of them, so copying rounded areas
  984. * will not leak any kernel data
  985. */
  986. size = round_up(map->value_size, 8);
  987. rcu_read_lock();
  988. l = __htab_map_lookup_elem(map, key);
  989. if (!l)
  990. goto out;
  991. if (htab_is_lru(htab))
  992. bpf_lru_node_set_ref(&l->lru_node);
  993. pptr = htab_elem_get_ptr(l, map->key_size);
  994. for_each_possible_cpu(cpu) {
  995. bpf_long_memcpy(value + off,
  996. per_cpu_ptr(pptr, cpu), size);
  997. off += size;
  998. }
  999. ret = 0;
  1000. out:
  1001. rcu_read_unlock();
  1002. return ret;
  1003. }
  1004. int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
  1005. u64 map_flags)
  1006. {
  1007. struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
  1008. int ret;
  1009. rcu_read_lock();
  1010. if (htab_is_lru(htab))
  1011. ret = __htab_lru_percpu_map_update_elem(map, key, value,
  1012. map_flags, true);
  1013. else
  1014. ret = __htab_percpu_map_update_elem(map, key, value, map_flags,
  1015. true);
  1016. rcu_read_unlock();
  1017. return ret;
  1018. }
  1019. const struct bpf_map_ops htab_percpu_map_ops = {
  1020. .map_alloc = htab_map_alloc,
  1021. .map_free = htab_map_free,
  1022. .map_get_next_key = htab_map_get_next_key,
  1023. .map_lookup_elem = htab_percpu_map_lookup_elem,
  1024. .map_update_elem = htab_percpu_map_update_elem,
  1025. .map_delete_elem = htab_map_delete_elem,
  1026. };
  1027. const struct bpf_map_ops htab_lru_percpu_map_ops = {
  1028. .map_alloc = htab_map_alloc,
  1029. .map_free = htab_map_free,
  1030. .map_get_next_key = htab_map_get_next_key,
  1031. .map_lookup_elem = htab_lru_percpu_map_lookup_elem,
  1032. .map_update_elem = htab_lru_percpu_map_update_elem,
  1033. .map_delete_elem = htab_lru_map_delete_elem,
  1034. };
  1035. static struct bpf_map *fd_htab_map_alloc(union bpf_attr *attr)
  1036. {
  1037. if (attr->value_size != sizeof(u32))
  1038. return ERR_PTR(-EINVAL);
  1039. return htab_map_alloc(attr);
  1040. }
  1041. static void fd_htab_map_free(struct bpf_map *map)
  1042. {
  1043. struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
  1044. struct hlist_nulls_node *n;
  1045. struct hlist_nulls_head *head;
  1046. struct htab_elem *l;
  1047. int i;
  1048. for (i = 0; i < htab->n_buckets; i++) {
  1049. head = select_bucket(htab, i);
  1050. hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
  1051. void *ptr = fd_htab_map_get_ptr(map, l);
  1052. map->ops->map_fd_put_ptr(ptr);
  1053. }
  1054. }
  1055. htab_map_free(map);
  1056. }
  1057. /* only called from syscall */
  1058. int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value)
  1059. {
  1060. void **ptr;
  1061. int ret = 0;
  1062. if (!map->ops->map_fd_sys_lookup_elem)
  1063. return -ENOTSUPP;
  1064. rcu_read_lock();
  1065. ptr = htab_map_lookup_elem(map, key);
  1066. if (ptr)
  1067. *value = map->ops->map_fd_sys_lookup_elem(READ_ONCE(*ptr));
  1068. else
  1069. ret = -ENOENT;
  1070. rcu_read_unlock();
  1071. return ret;
  1072. }
  1073. /* only called from syscall */
  1074. int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
  1075. void *key, void *value, u64 map_flags)
  1076. {
  1077. void *ptr;
  1078. int ret;
  1079. u32 ufd = *(u32 *)value;
  1080. ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
  1081. if (IS_ERR(ptr))
  1082. return PTR_ERR(ptr);
  1083. ret = htab_map_update_elem(map, key, &ptr, map_flags);
  1084. if (ret)
  1085. map->ops->map_fd_put_ptr(ptr);
  1086. return ret;
  1087. }
  1088. static struct bpf_map *htab_of_map_alloc(union bpf_attr *attr)
  1089. {
  1090. struct bpf_map *map, *inner_map_meta;
  1091. inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd);
  1092. if (IS_ERR(inner_map_meta))
  1093. return inner_map_meta;
  1094. map = fd_htab_map_alloc(attr);
  1095. if (IS_ERR(map)) {
  1096. bpf_map_meta_free(inner_map_meta);
  1097. return map;
  1098. }
  1099. map->inner_map_meta = inner_map_meta;
  1100. return map;
  1101. }
  1102. static void *htab_of_map_lookup_elem(struct bpf_map *map, void *key)
  1103. {
  1104. struct bpf_map **inner_map = htab_map_lookup_elem(map, key);
  1105. if (!inner_map)
  1106. return NULL;
  1107. return READ_ONCE(*inner_map);
  1108. }
  1109. static u32 htab_of_map_gen_lookup(struct bpf_map *map,
  1110. struct bpf_insn *insn_buf)
  1111. {
  1112. struct bpf_insn *insn = insn_buf;
  1113. const int ret = BPF_REG_0;
  1114. *insn++ = BPF_EMIT_CALL((u64 (*)(u64, u64, u64, u64, u64))__htab_map_lookup_elem);
  1115. *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 2);
  1116. *insn++ = BPF_ALU64_IMM(BPF_ADD, ret,
  1117. offsetof(struct htab_elem, key) +
  1118. round_up(map->key_size, 8));
  1119. *insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0);
  1120. return insn - insn_buf;
  1121. }
  1122. static void htab_of_map_free(struct bpf_map *map)
  1123. {
  1124. bpf_map_meta_free(map->inner_map_meta);
  1125. fd_htab_map_free(map);
  1126. }
  1127. const struct bpf_map_ops htab_of_maps_map_ops = {
  1128. .map_alloc = htab_of_map_alloc,
  1129. .map_free = htab_of_map_free,
  1130. .map_get_next_key = htab_map_get_next_key,
  1131. .map_lookup_elem = htab_of_map_lookup_elem,
  1132. .map_delete_elem = htab_map_delete_elem,
  1133. .map_fd_get_ptr = bpf_map_fd_get_ptr,
  1134. .map_fd_put_ptr = bpf_map_fd_put_ptr,
  1135. .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
  1136. .map_gen_lookup = htab_of_map_gen_lookup,
  1137. };