hashtab.c 34 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385
  1. /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
  2. * Copyright (c) 2016 Facebook
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of version 2 of the GNU General Public
  6. * License as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. */
  13. #include <linux/bpf.h>
  14. #include <linux/jhash.h>
  15. #include <linux/filter.h>
  16. #include <linux/rculist_nulls.h>
  17. #include "percpu_freelist.h"
  18. #include "bpf_lru_list.h"
  19. #include "map_in_map.h"
  20. #define HTAB_CREATE_FLAG_MASK \
  21. (BPF_F_NO_PREALLOC | BPF_F_NO_COMMON_LRU | BPF_F_NUMA_NODE)
  22. struct bucket {
  23. struct hlist_nulls_head head;
  24. raw_spinlock_t lock;
  25. };
  26. struct bpf_htab {
  27. struct bpf_map map;
  28. struct bucket *buckets;
  29. void *elems;
  30. union {
  31. struct pcpu_freelist freelist;
  32. struct bpf_lru lru;
  33. };
  34. struct htab_elem *__percpu *extra_elems;
  35. atomic_t count; /* number of elements in this hashtable */
  36. u32 n_buckets; /* number of hash buckets */
  37. u32 elem_size; /* size of each element in bytes */
  38. };
  39. /* each htab element is struct htab_elem + key + value */
  40. struct htab_elem {
  41. union {
  42. struct hlist_nulls_node hash_node;
  43. struct {
  44. void *padding;
  45. union {
  46. struct bpf_htab *htab;
  47. struct pcpu_freelist_node fnode;
  48. };
  49. };
  50. };
  51. union {
  52. struct rcu_head rcu;
  53. struct bpf_lru_node lru_node;
  54. };
  55. u32 hash;
  56. char key[0] __aligned(8);
  57. };
  58. static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node);
  59. static bool htab_is_lru(const struct bpf_htab *htab)
  60. {
  61. return htab->map.map_type == BPF_MAP_TYPE_LRU_HASH ||
  62. htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
  63. }
  64. static bool htab_is_percpu(const struct bpf_htab *htab)
  65. {
  66. return htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH ||
  67. htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
  68. }
  69. static bool htab_is_prealloc(const struct bpf_htab *htab)
  70. {
  71. return !(htab->map.map_flags & BPF_F_NO_PREALLOC);
  72. }
  73. static inline void htab_elem_set_ptr(struct htab_elem *l, u32 key_size,
  74. void __percpu *pptr)
  75. {
  76. *(void __percpu **)(l->key + key_size) = pptr;
  77. }
  78. static inline void __percpu *htab_elem_get_ptr(struct htab_elem *l, u32 key_size)
  79. {
  80. return *(void __percpu **)(l->key + key_size);
  81. }
  82. static void *fd_htab_map_get_ptr(const struct bpf_map *map, struct htab_elem *l)
  83. {
  84. return *(void **)(l->key + roundup(map->key_size, 8));
  85. }
  86. static struct htab_elem *get_htab_elem(struct bpf_htab *htab, int i)
  87. {
  88. return (struct htab_elem *) (htab->elems + i * htab->elem_size);
  89. }
  90. static void htab_free_elems(struct bpf_htab *htab)
  91. {
  92. int i;
  93. if (!htab_is_percpu(htab))
  94. goto free_elems;
  95. for (i = 0; i < htab->map.max_entries; i++) {
  96. void __percpu *pptr;
  97. pptr = htab_elem_get_ptr(get_htab_elem(htab, i),
  98. htab->map.key_size);
  99. free_percpu(pptr);
  100. }
  101. free_elems:
  102. bpf_map_area_free(htab->elems);
  103. }
  104. static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key,
  105. u32 hash)
  106. {
  107. struct bpf_lru_node *node = bpf_lru_pop_free(&htab->lru, hash);
  108. struct htab_elem *l;
  109. if (node) {
  110. l = container_of(node, struct htab_elem, lru_node);
  111. memcpy(l->key, key, htab->map.key_size);
  112. return l;
  113. }
  114. return NULL;
  115. }
  116. static int prealloc_init(struct bpf_htab *htab)
  117. {
  118. u32 num_entries = htab->map.max_entries;
  119. int err = -ENOMEM, i;
  120. if (!htab_is_percpu(htab) && !htab_is_lru(htab))
  121. num_entries += num_possible_cpus();
  122. htab->elems = bpf_map_area_alloc(htab->elem_size * num_entries,
  123. htab->map.numa_node);
  124. if (!htab->elems)
  125. return -ENOMEM;
  126. if (!htab_is_percpu(htab))
  127. goto skip_percpu_elems;
  128. for (i = 0; i < num_entries; i++) {
  129. u32 size = round_up(htab->map.value_size, 8);
  130. void __percpu *pptr;
  131. pptr = __alloc_percpu_gfp(size, 8, GFP_USER | __GFP_NOWARN);
  132. if (!pptr)
  133. goto free_elems;
  134. htab_elem_set_ptr(get_htab_elem(htab, i), htab->map.key_size,
  135. pptr);
  136. }
  137. skip_percpu_elems:
  138. if (htab_is_lru(htab))
  139. err = bpf_lru_init(&htab->lru,
  140. htab->map.map_flags & BPF_F_NO_COMMON_LRU,
  141. offsetof(struct htab_elem, hash) -
  142. offsetof(struct htab_elem, lru_node),
  143. htab_lru_map_delete_node,
  144. htab);
  145. else
  146. err = pcpu_freelist_init(&htab->freelist);
  147. if (err)
  148. goto free_elems;
  149. if (htab_is_lru(htab))
  150. bpf_lru_populate(&htab->lru, htab->elems,
  151. offsetof(struct htab_elem, lru_node),
  152. htab->elem_size, num_entries);
  153. else
  154. pcpu_freelist_populate(&htab->freelist,
  155. htab->elems + offsetof(struct htab_elem, fnode),
  156. htab->elem_size, num_entries);
  157. return 0;
  158. free_elems:
  159. htab_free_elems(htab);
  160. return err;
  161. }
  162. static void prealloc_destroy(struct bpf_htab *htab)
  163. {
  164. htab_free_elems(htab);
  165. if (htab_is_lru(htab))
  166. bpf_lru_destroy(&htab->lru);
  167. else
  168. pcpu_freelist_destroy(&htab->freelist);
  169. }
  170. static int alloc_extra_elems(struct bpf_htab *htab)
  171. {
  172. struct htab_elem *__percpu *pptr, *l_new;
  173. struct pcpu_freelist_node *l;
  174. int cpu;
  175. pptr = __alloc_percpu_gfp(sizeof(struct htab_elem *), 8,
  176. GFP_USER | __GFP_NOWARN);
  177. if (!pptr)
  178. return -ENOMEM;
  179. for_each_possible_cpu(cpu) {
  180. l = pcpu_freelist_pop(&htab->freelist);
  181. /* pop will succeed, since prealloc_init()
  182. * preallocated extra num_possible_cpus elements
  183. */
  184. l_new = container_of(l, struct htab_elem, fnode);
  185. *per_cpu_ptr(pptr, cpu) = l_new;
  186. }
  187. htab->extra_elems = pptr;
  188. return 0;
  189. }
  190. /* Called from syscall */
  191. static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
  192. {
  193. bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
  194. attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
  195. bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH ||
  196. attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
  197. /* percpu_lru means each cpu has its own LRU list.
  198. * it is different from BPF_MAP_TYPE_PERCPU_HASH where
  199. * the map's value itself is percpu. percpu_lru has
  200. * nothing to do with the map's value.
  201. */
  202. bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU);
  203. bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
  204. int numa_node = bpf_map_attr_numa_node(attr);
  205. struct bpf_htab *htab;
  206. int err, i;
  207. u64 cost;
  208. BUILD_BUG_ON(offsetof(struct htab_elem, htab) !=
  209. offsetof(struct htab_elem, hash_node.pprev));
  210. BUILD_BUG_ON(offsetof(struct htab_elem, fnode.next) !=
  211. offsetof(struct htab_elem, hash_node.pprev));
  212. if (lru && !capable(CAP_SYS_ADMIN))
  213. /* LRU implementation is much complicated than other
  214. * maps. Hence, limit to CAP_SYS_ADMIN for now.
  215. */
  216. return ERR_PTR(-EPERM);
  217. if (attr->map_flags & ~HTAB_CREATE_FLAG_MASK)
  218. /* reserved bits should not be used */
  219. return ERR_PTR(-EINVAL);
  220. if (!lru && percpu_lru)
  221. return ERR_PTR(-EINVAL);
  222. if (lru && !prealloc)
  223. return ERR_PTR(-ENOTSUPP);
  224. if (numa_node != NUMA_NO_NODE && (percpu || percpu_lru))
  225. return ERR_PTR(-EINVAL);
  226. htab = kzalloc(sizeof(*htab), GFP_USER);
  227. if (!htab)
  228. return ERR_PTR(-ENOMEM);
  229. /* mandatory map attributes */
  230. htab->map.map_type = attr->map_type;
  231. htab->map.key_size = attr->key_size;
  232. htab->map.value_size = attr->value_size;
  233. htab->map.max_entries = attr->max_entries;
  234. htab->map.map_flags = attr->map_flags;
  235. htab->map.numa_node = numa_node;
  236. /* check sanity of attributes.
  237. * value_size == 0 may be allowed in the future to use map as a set
  238. */
  239. err = -EINVAL;
  240. if (htab->map.max_entries == 0 || htab->map.key_size == 0 ||
  241. htab->map.value_size == 0)
  242. goto free_htab;
  243. if (percpu_lru) {
  244. /* ensure each CPU's lru list has >=1 elements.
  245. * since we are at it, make each lru list has the same
  246. * number of elements.
  247. */
  248. htab->map.max_entries = roundup(attr->max_entries,
  249. num_possible_cpus());
  250. if (htab->map.max_entries < attr->max_entries)
  251. htab->map.max_entries = rounddown(attr->max_entries,
  252. num_possible_cpus());
  253. }
  254. /* hash table size must be power of 2 */
  255. htab->n_buckets = roundup_pow_of_two(htab->map.max_entries);
  256. err = -E2BIG;
  257. if (htab->map.key_size > MAX_BPF_STACK)
  258. /* eBPF programs initialize keys on stack, so they cannot be
  259. * larger than max stack size
  260. */
  261. goto free_htab;
  262. if (htab->map.value_size >= KMALLOC_MAX_SIZE -
  263. MAX_BPF_STACK - sizeof(struct htab_elem))
  264. /* if value_size is bigger, the user space won't be able to
  265. * access the elements via bpf syscall. This check also makes
  266. * sure that the elem_size doesn't overflow and it's
  267. * kmalloc-able later in htab_map_update_elem()
  268. */
  269. goto free_htab;
  270. if (percpu && round_up(htab->map.value_size, 8) > PCPU_MIN_UNIT_SIZE)
  271. /* make sure the size for pcpu_alloc() is reasonable */
  272. goto free_htab;
  273. htab->elem_size = sizeof(struct htab_elem) +
  274. round_up(htab->map.key_size, 8);
  275. if (percpu)
  276. htab->elem_size += sizeof(void *);
  277. else
  278. htab->elem_size += round_up(htab->map.value_size, 8);
  279. /* prevent zero size kmalloc and check for u32 overflow */
  280. if (htab->n_buckets == 0 ||
  281. htab->n_buckets > U32_MAX / sizeof(struct bucket))
  282. goto free_htab;
  283. cost = (u64) htab->n_buckets * sizeof(struct bucket) +
  284. (u64) htab->elem_size * htab->map.max_entries;
  285. if (percpu)
  286. cost += (u64) round_up(htab->map.value_size, 8) *
  287. num_possible_cpus() * htab->map.max_entries;
  288. else
  289. cost += (u64) htab->elem_size * num_possible_cpus();
  290. if (cost >= U32_MAX - PAGE_SIZE)
  291. /* make sure page count doesn't overflow */
  292. goto free_htab;
  293. htab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
  294. /* if map size is larger than memlock limit, reject it early */
  295. err = bpf_map_precharge_memlock(htab->map.pages);
  296. if (err)
  297. goto free_htab;
  298. err = -ENOMEM;
  299. htab->buckets = bpf_map_area_alloc(htab->n_buckets *
  300. sizeof(struct bucket),
  301. htab->map.numa_node);
  302. if (!htab->buckets)
  303. goto free_htab;
  304. for (i = 0; i < htab->n_buckets; i++) {
  305. INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i);
  306. raw_spin_lock_init(&htab->buckets[i].lock);
  307. }
  308. if (prealloc) {
  309. err = prealloc_init(htab);
  310. if (err)
  311. goto free_buckets;
  312. if (!percpu && !lru) {
  313. /* lru itself can remove the least used element, so
  314. * there is no need for an extra elem during map_update.
  315. */
  316. err = alloc_extra_elems(htab);
  317. if (err)
  318. goto free_prealloc;
  319. }
  320. }
  321. return &htab->map;
  322. free_prealloc:
  323. prealloc_destroy(htab);
  324. free_buckets:
  325. bpf_map_area_free(htab->buckets);
  326. free_htab:
  327. kfree(htab);
  328. return ERR_PTR(err);
  329. }
  330. static inline u32 htab_map_hash(const void *key, u32 key_len)
  331. {
  332. return jhash(key, key_len, 0);
  333. }
  334. static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash)
  335. {
  336. return &htab->buckets[hash & (htab->n_buckets - 1)];
  337. }
  338. static inline struct hlist_nulls_head *select_bucket(struct bpf_htab *htab, u32 hash)
  339. {
  340. return &__select_bucket(htab, hash)->head;
  341. }
  342. /* this lookup function can only be called with bucket lock taken */
  343. static struct htab_elem *lookup_elem_raw(struct hlist_nulls_head *head, u32 hash,
  344. void *key, u32 key_size)
  345. {
  346. struct hlist_nulls_node *n;
  347. struct htab_elem *l;
  348. hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
  349. if (l->hash == hash && !memcmp(&l->key, key, key_size))
  350. return l;
  351. return NULL;
  352. }
  353. /* can be called without bucket lock. it will repeat the loop in
  354. * the unlikely event when elements moved from one bucket into another
  355. * while link list is being walked
  356. */
  357. static struct htab_elem *lookup_nulls_elem_raw(struct hlist_nulls_head *head,
  358. u32 hash, void *key,
  359. u32 key_size, u32 n_buckets)
  360. {
  361. struct hlist_nulls_node *n;
  362. struct htab_elem *l;
  363. again:
  364. hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
  365. if (l->hash == hash && !memcmp(&l->key, key, key_size))
  366. return l;
  367. if (unlikely(get_nulls_value(n) != (hash & (n_buckets - 1))))
  368. goto again;
  369. return NULL;
  370. }
  371. /* Called from syscall or from eBPF program directly, so
  372. * arguments have to match bpf_map_lookup_elem() exactly.
  373. * The return value is adjusted by BPF instructions
  374. * in htab_map_gen_lookup().
  375. */
  376. static void *__htab_map_lookup_elem(struct bpf_map *map, void *key)
  377. {
  378. struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
  379. struct hlist_nulls_head *head;
  380. struct htab_elem *l;
  381. u32 hash, key_size;
  382. /* Must be called with rcu_read_lock. */
  383. WARN_ON_ONCE(!rcu_read_lock_held());
  384. key_size = map->key_size;
  385. hash = htab_map_hash(key, key_size);
  386. head = select_bucket(htab, hash);
  387. l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets);
  388. return l;
  389. }
  390. static void *htab_map_lookup_elem(struct bpf_map *map, void *key)
  391. {
  392. struct htab_elem *l = __htab_map_lookup_elem(map, key);
  393. if (l)
  394. return l->key + round_up(map->key_size, 8);
  395. return NULL;
  396. }
  397. /* inline bpf_map_lookup_elem() call.
  398. * Instead of:
  399. * bpf_prog
  400. * bpf_map_lookup_elem
  401. * map->ops->map_lookup_elem
  402. * htab_map_lookup_elem
  403. * __htab_map_lookup_elem
  404. * do:
  405. * bpf_prog
  406. * __htab_map_lookup_elem
  407. */
  408. static u32 htab_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
  409. {
  410. struct bpf_insn *insn = insn_buf;
  411. const int ret = BPF_REG_0;
  412. *insn++ = BPF_EMIT_CALL((u64 (*)(u64, u64, u64, u64, u64))__htab_map_lookup_elem);
  413. *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1);
  414. *insn++ = BPF_ALU64_IMM(BPF_ADD, ret,
  415. offsetof(struct htab_elem, key) +
  416. round_up(map->key_size, 8));
  417. return insn - insn_buf;
  418. }
  419. static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key)
  420. {
  421. struct htab_elem *l = __htab_map_lookup_elem(map, key);
  422. if (l) {
  423. bpf_lru_node_set_ref(&l->lru_node);
  424. return l->key + round_up(map->key_size, 8);
  425. }
  426. return NULL;
  427. }
  428. static u32 htab_lru_map_gen_lookup(struct bpf_map *map,
  429. struct bpf_insn *insn_buf)
  430. {
  431. struct bpf_insn *insn = insn_buf;
  432. const int ret = BPF_REG_0;
  433. const int ref_reg = BPF_REG_1;
  434. *insn++ = BPF_EMIT_CALL((u64 (*)(u64, u64, u64, u64, u64))__htab_map_lookup_elem);
  435. *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 4);
  436. *insn++ = BPF_LDX_MEM(BPF_B, ref_reg, ret,
  437. offsetof(struct htab_elem, lru_node) +
  438. offsetof(struct bpf_lru_node, ref));
  439. *insn++ = BPF_JMP_IMM(BPF_JNE, ref_reg, 0, 1);
  440. *insn++ = BPF_ST_MEM(BPF_B, ret,
  441. offsetof(struct htab_elem, lru_node) +
  442. offsetof(struct bpf_lru_node, ref),
  443. 1);
  444. *insn++ = BPF_ALU64_IMM(BPF_ADD, ret,
  445. offsetof(struct htab_elem, key) +
  446. round_up(map->key_size, 8));
  447. return insn - insn_buf;
  448. }
  449. /* It is called from the bpf_lru_list when the LRU needs to delete
  450. * older elements from the htab.
  451. */
  452. static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node)
  453. {
  454. struct bpf_htab *htab = (struct bpf_htab *)arg;
  455. struct htab_elem *l = NULL, *tgt_l;
  456. struct hlist_nulls_head *head;
  457. struct hlist_nulls_node *n;
  458. unsigned long flags;
  459. struct bucket *b;
  460. tgt_l = container_of(node, struct htab_elem, lru_node);
  461. b = __select_bucket(htab, tgt_l->hash);
  462. head = &b->head;
  463. raw_spin_lock_irqsave(&b->lock, flags);
  464. hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
  465. if (l == tgt_l) {
  466. hlist_nulls_del_rcu(&l->hash_node);
  467. break;
  468. }
  469. raw_spin_unlock_irqrestore(&b->lock, flags);
  470. return l == tgt_l;
  471. }
  472. /* Called from syscall */
  473. static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
  474. {
  475. struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
  476. struct hlist_nulls_head *head;
  477. struct htab_elem *l, *next_l;
  478. u32 hash, key_size;
  479. int i = 0;
  480. WARN_ON_ONCE(!rcu_read_lock_held());
  481. key_size = map->key_size;
  482. if (!key)
  483. goto find_first_elem;
  484. hash = htab_map_hash(key, key_size);
  485. head = select_bucket(htab, hash);
  486. /* lookup the key */
  487. l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets);
  488. if (!l)
  489. goto find_first_elem;
  490. /* key was found, get next key in the same bucket */
  491. next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_next_rcu(&l->hash_node)),
  492. struct htab_elem, hash_node);
  493. if (next_l) {
  494. /* if next elem in this hash list is non-zero, just return it */
  495. memcpy(next_key, next_l->key, key_size);
  496. return 0;
  497. }
  498. /* no more elements in this hash list, go to the next bucket */
  499. i = hash & (htab->n_buckets - 1);
  500. i++;
  501. find_first_elem:
  502. /* iterate over buckets */
  503. for (; i < htab->n_buckets; i++) {
  504. head = select_bucket(htab, i);
  505. /* pick first element in the bucket */
  506. next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_first_rcu(head)),
  507. struct htab_elem, hash_node);
  508. if (next_l) {
  509. /* if it's not empty, just return it */
  510. memcpy(next_key, next_l->key, key_size);
  511. return 0;
  512. }
  513. }
  514. /* iterated over all buckets and all elements */
  515. return -ENOENT;
  516. }
  517. static void htab_elem_free(struct bpf_htab *htab, struct htab_elem *l)
  518. {
  519. if (htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH)
  520. free_percpu(htab_elem_get_ptr(l, htab->map.key_size));
  521. kfree(l);
  522. }
  523. static void htab_elem_free_rcu(struct rcu_head *head)
  524. {
  525. struct htab_elem *l = container_of(head, struct htab_elem, rcu);
  526. struct bpf_htab *htab = l->htab;
  527. /* must increment bpf_prog_active to avoid kprobe+bpf triggering while
  528. * we're calling kfree, otherwise deadlock is possible if kprobes
  529. * are placed somewhere inside of slub
  530. */
  531. preempt_disable();
  532. __this_cpu_inc(bpf_prog_active);
  533. htab_elem_free(htab, l);
  534. __this_cpu_dec(bpf_prog_active);
  535. preempt_enable();
  536. }
  537. static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
  538. {
  539. struct bpf_map *map = &htab->map;
  540. if (map->ops->map_fd_put_ptr) {
  541. void *ptr = fd_htab_map_get_ptr(map, l);
  542. map->ops->map_fd_put_ptr(ptr);
  543. }
  544. if (htab_is_prealloc(htab)) {
  545. pcpu_freelist_push(&htab->freelist, &l->fnode);
  546. } else {
  547. atomic_dec(&htab->count);
  548. l->htab = htab;
  549. call_rcu(&l->rcu, htab_elem_free_rcu);
  550. }
  551. }
  552. static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr,
  553. void *value, bool onallcpus)
  554. {
  555. if (!onallcpus) {
  556. /* copy true value_size bytes */
  557. memcpy(this_cpu_ptr(pptr), value, htab->map.value_size);
  558. } else {
  559. u32 size = round_up(htab->map.value_size, 8);
  560. int off = 0, cpu;
  561. for_each_possible_cpu(cpu) {
  562. bpf_long_memcpy(per_cpu_ptr(pptr, cpu),
  563. value + off, size);
  564. off += size;
  565. }
  566. }
  567. }
  568. static bool fd_htab_map_needs_adjust(const struct bpf_htab *htab)
  569. {
  570. return htab->map.map_type == BPF_MAP_TYPE_HASH_OF_MAPS &&
  571. BITS_PER_LONG == 64;
  572. }
  573. static u32 htab_size_value(const struct bpf_htab *htab, bool percpu)
  574. {
  575. u32 size = htab->map.value_size;
  576. if (percpu || fd_htab_map_needs_adjust(htab))
  577. size = round_up(size, 8);
  578. return size;
  579. }
  580. static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
  581. void *value, u32 key_size, u32 hash,
  582. bool percpu, bool onallcpus,
  583. struct htab_elem *old_elem)
  584. {
  585. u32 size = htab_size_value(htab, percpu);
  586. bool prealloc = htab_is_prealloc(htab);
  587. struct htab_elem *l_new, **pl_new;
  588. void __percpu *pptr;
  589. if (prealloc) {
  590. if (old_elem) {
  591. /* if we're updating the existing element,
  592. * use per-cpu extra elems to avoid freelist_pop/push
  593. */
  594. pl_new = this_cpu_ptr(htab->extra_elems);
  595. l_new = *pl_new;
  596. *pl_new = old_elem;
  597. } else {
  598. struct pcpu_freelist_node *l;
  599. l = pcpu_freelist_pop(&htab->freelist);
  600. if (!l)
  601. return ERR_PTR(-E2BIG);
  602. l_new = container_of(l, struct htab_elem, fnode);
  603. }
  604. } else {
  605. if (atomic_inc_return(&htab->count) > htab->map.max_entries)
  606. if (!old_elem) {
  607. /* when map is full and update() is replacing
  608. * old element, it's ok to allocate, since
  609. * old element will be freed immediately.
  610. * Otherwise return an error
  611. */
  612. atomic_dec(&htab->count);
  613. return ERR_PTR(-E2BIG);
  614. }
  615. l_new = kmalloc_node(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN,
  616. htab->map.numa_node);
  617. if (!l_new)
  618. return ERR_PTR(-ENOMEM);
  619. }
  620. memcpy(l_new->key, key, key_size);
  621. if (percpu) {
  622. if (prealloc) {
  623. pptr = htab_elem_get_ptr(l_new, key_size);
  624. } else {
  625. /* alloc_percpu zero-fills */
  626. pptr = __alloc_percpu_gfp(size, 8,
  627. GFP_ATOMIC | __GFP_NOWARN);
  628. if (!pptr) {
  629. kfree(l_new);
  630. return ERR_PTR(-ENOMEM);
  631. }
  632. }
  633. pcpu_copy_value(htab, pptr, value, onallcpus);
  634. if (!prealloc)
  635. htab_elem_set_ptr(l_new, key_size, pptr);
  636. } else {
  637. memcpy(l_new->key + round_up(key_size, 8), value, size);
  638. }
  639. l_new->hash = hash;
  640. return l_new;
  641. }
  642. static int check_flags(struct bpf_htab *htab, struct htab_elem *l_old,
  643. u64 map_flags)
  644. {
  645. if (l_old && map_flags == BPF_NOEXIST)
  646. /* elem already exists */
  647. return -EEXIST;
  648. if (!l_old && map_flags == BPF_EXIST)
  649. /* elem doesn't exist, cannot update it */
  650. return -ENOENT;
  651. return 0;
  652. }
  653. /* Called from syscall or from eBPF program */
  654. static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
  655. u64 map_flags)
  656. {
  657. struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
  658. struct htab_elem *l_new = NULL, *l_old;
  659. struct hlist_nulls_head *head;
  660. unsigned long flags;
  661. struct bucket *b;
  662. u32 key_size, hash;
  663. int ret;
  664. if (unlikely(map_flags > BPF_EXIST))
  665. /* unknown flags */
  666. return -EINVAL;
  667. WARN_ON_ONCE(!rcu_read_lock_held());
  668. key_size = map->key_size;
  669. hash = htab_map_hash(key, key_size);
  670. b = __select_bucket(htab, hash);
  671. head = &b->head;
  672. /* bpf_map_update_elem() can be called in_irq() */
  673. raw_spin_lock_irqsave(&b->lock, flags);
  674. l_old = lookup_elem_raw(head, hash, key, key_size);
  675. ret = check_flags(htab, l_old, map_flags);
  676. if (ret)
  677. goto err;
  678. l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false,
  679. l_old);
  680. if (IS_ERR(l_new)) {
  681. /* all pre-allocated elements are in use or memory exhausted */
  682. ret = PTR_ERR(l_new);
  683. goto err;
  684. }
  685. /* add new element to the head of the list, so that
  686. * concurrent search will find it before old elem
  687. */
  688. hlist_nulls_add_head_rcu(&l_new->hash_node, head);
  689. if (l_old) {
  690. hlist_nulls_del_rcu(&l_old->hash_node);
  691. if (!htab_is_prealloc(htab))
  692. free_htab_elem(htab, l_old);
  693. }
  694. ret = 0;
  695. err:
  696. raw_spin_unlock_irqrestore(&b->lock, flags);
  697. return ret;
  698. }
  699. static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
  700. u64 map_flags)
  701. {
  702. struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
  703. struct htab_elem *l_new, *l_old = NULL;
  704. struct hlist_nulls_head *head;
  705. unsigned long flags;
  706. struct bucket *b;
  707. u32 key_size, hash;
  708. int ret;
  709. if (unlikely(map_flags > BPF_EXIST))
  710. /* unknown flags */
  711. return -EINVAL;
  712. WARN_ON_ONCE(!rcu_read_lock_held());
  713. key_size = map->key_size;
  714. hash = htab_map_hash(key, key_size);
  715. b = __select_bucket(htab, hash);
  716. head = &b->head;
  717. /* For LRU, we need to alloc before taking bucket's
  718. * spinlock because getting free nodes from LRU may need
  719. * to remove older elements from htab and this removal
  720. * operation will need a bucket lock.
  721. */
  722. l_new = prealloc_lru_pop(htab, key, hash);
  723. if (!l_new)
  724. return -ENOMEM;
  725. memcpy(l_new->key + round_up(map->key_size, 8), value, map->value_size);
  726. /* bpf_map_update_elem() can be called in_irq() */
  727. raw_spin_lock_irqsave(&b->lock, flags);
  728. l_old = lookup_elem_raw(head, hash, key, key_size);
  729. ret = check_flags(htab, l_old, map_flags);
  730. if (ret)
  731. goto err;
  732. /* add new element to the head of the list, so that
  733. * concurrent search will find it before old elem
  734. */
  735. hlist_nulls_add_head_rcu(&l_new->hash_node, head);
  736. if (l_old) {
  737. bpf_lru_node_set_ref(&l_new->lru_node);
  738. hlist_nulls_del_rcu(&l_old->hash_node);
  739. }
  740. ret = 0;
  741. err:
  742. raw_spin_unlock_irqrestore(&b->lock, flags);
  743. if (ret)
  744. bpf_lru_push_free(&htab->lru, &l_new->lru_node);
  745. else if (l_old)
  746. bpf_lru_push_free(&htab->lru, &l_old->lru_node);
  747. return ret;
  748. }
  749. static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
  750. void *value, u64 map_flags,
  751. bool onallcpus)
  752. {
  753. struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
  754. struct htab_elem *l_new = NULL, *l_old;
  755. struct hlist_nulls_head *head;
  756. unsigned long flags;
  757. struct bucket *b;
  758. u32 key_size, hash;
  759. int ret;
  760. if (unlikely(map_flags > BPF_EXIST))
  761. /* unknown flags */
  762. return -EINVAL;
  763. WARN_ON_ONCE(!rcu_read_lock_held());
  764. key_size = map->key_size;
  765. hash = htab_map_hash(key, key_size);
  766. b = __select_bucket(htab, hash);
  767. head = &b->head;
  768. /* bpf_map_update_elem() can be called in_irq() */
  769. raw_spin_lock_irqsave(&b->lock, flags);
  770. l_old = lookup_elem_raw(head, hash, key, key_size);
  771. ret = check_flags(htab, l_old, map_flags);
  772. if (ret)
  773. goto err;
  774. if (l_old) {
  775. /* per-cpu hash map can update value in-place */
  776. pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size),
  777. value, onallcpus);
  778. } else {
  779. l_new = alloc_htab_elem(htab, key, value, key_size,
  780. hash, true, onallcpus, NULL);
  781. if (IS_ERR(l_new)) {
  782. ret = PTR_ERR(l_new);
  783. goto err;
  784. }
  785. hlist_nulls_add_head_rcu(&l_new->hash_node, head);
  786. }
  787. ret = 0;
  788. err:
  789. raw_spin_unlock_irqrestore(&b->lock, flags);
  790. return ret;
  791. }
  792. static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
  793. void *value, u64 map_flags,
  794. bool onallcpus)
  795. {
  796. struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
  797. struct htab_elem *l_new = NULL, *l_old;
  798. struct hlist_nulls_head *head;
  799. unsigned long flags;
  800. struct bucket *b;
  801. u32 key_size, hash;
  802. int ret;
  803. if (unlikely(map_flags > BPF_EXIST))
  804. /* unknown flags */
  805. return -EINVAL;
  806. WARN_ON_ONCE(!rcu_read_lock_held());
  807. key_size = map->key_size;
  808. hash = htab_map_hash(key, key_size);
  809. b = __select_bucket(htab, hash);
  810. head = &b->head;
  811. /* For LRU, we need to alloc before taking bucket's
  812. * spinlock because LRU's elem alloc may need
  813. * to remove older elem from htab and this removal
  814. * operation will need a bucket lock.
  815. */
  816. if (map_flags != BPF_EXIST) {
  817. l_new = prealloc_lru_pop(htab, key, hash);
  818. if (!l_new)
  819. return -ENOMEM;
  820. }
  821. /* bpf_map_update_elem() can be called in_irq() */
  822. raw_spin_lock_irqsave(&b->lock, flags);
  823. l_old = lookup_elem_raw(head, hash, key, key_size);
  824. ret = check_flags(htab, l_old, map_flags);
  825. if (ret)
  826. goto err;
  827. if (l_old) {
  828. bpf_lru_node_set_ref(&l_old->lru_node);
  829. /* per-cpu hash map can update value in-place */
  830. pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size),
  831. value, onallcpus);
  832. } else {
  833. pcpu_copy_value(htab, htab_elem_get_ptr(l_new, key_size),
  834. value, onallcpus);
  835. hlist_nulls_add_head_rcu(&l_new->hash_node, head);
  836. l_new = NULL;
  837. }
  838. ret = 0;
  839. err:
  840. raw_spin_unlock_irqrestore(&b->lock, flags);
  841. if (l_new)
  842. bpf_lru_push_free(&htab->lru, &l_new->lru_node);
  843. return ret;
  844. }
  845. static int htab_percpu_map_update_elem(struct bpf_map *map, void *key,
  846. void *value, u64 map_flags)
  847. {
  848. return __htab_percpu_map_update_elem(map, key, value, map_flags, false);
  849. }
  850. static int htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
  851. void *value, u64 map_flags)
  852. {
  853. return __htab_lru_percpu_map_update_elem(map, key, value, map_flags,
  854. false);
  855. }
  856. /* Called from syscall or from eBPF program */
  857. static int htab_map_delete_elem(struct bpf_map *map, void *key)
  858. {
  859. struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
  860. struct hlist_nulls_head *head;
  861. struct bucket *b;
  862. struct htab_elem *l;
  863. unsigned long flags;
  864. u32 hash, key_size;
  865. int ret = -ENOENT;
  866. WARN_ON_ONCE(!rcu_read_lock_held());
  867. key_size = map->key_size;
  868. hash = htab_map_hash(key, key_size);
  869. b = __select_bucket(htab, hash);
  870. head = &b->head;
  871. raw_spin_lock_irqsave(&b->lock, flags);
  872. l = lookup_elem_raw(head, hash, key, key_size);
  873. if (l) {
  874. hlist_nulls_del_rcu(&l->hash_node);
  875. free_htab_elem(htab, l);
  876. ret = 0;
  877. }
  878. raw_spin_unlock_irqrestore(&b->lock, flags);
  879. return ret;
  880. }
  881. static int htab_lru_map_delete_elem(struct bpf_map *map, void *key)
  882. {
  883. struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
  884. struct hlist_nulls_head *head;
  885. struct bucket *b;
  886. struct htab_elem *l;
  887. unsigned long flags;
  888. u32 hash, key_size;
  889. int ret = -ENOENT;
  890. WARN_ON_ONCE(!rcu_read_lock_held());
  891. key_size = map->key_size;
  892. hash = htab_map_hash(key, key_size);
  893. b = __select_bucket(htab, hash);
  894. head = &b->head;
  895. raw_spin_lock_irqsave(&b->lock, flags);
  896. l = lookup_elem_raw(head, hash, key, key_size);
  897. if (l) {
  898. hlist_nulls_del_rcu(&l->hash_node);
  899. ret = 0;
  900. }
  901. raw_spin_unlock_irqrestore(&b->lock, flags);
  902. if (l)
  903. bpf_lru_push_free(&htab->lru, &l->lru_node);
  904. return ret;
  905. }
  906. static void delete_all_elements(struct bpf_htab *htab)
  907. {
  908. int i;
  909. for (i = 0; i < htab->n_buckets; i++) {
  910. struct hlist_nulls_head *head = select_bucket(htab, i);
  911. struct hlist_nulls_node *n;
  912. struct htab_elem *l;
  913. hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
  914. hlist_nulls_del_rcu(&l->hash_node);
  915. htab_elem_free(htab, l);
  916. }
  917. }
  918. }
  919. /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
  920. static void htab_map_free(struct bpf_map *map)
  921. {
  922. struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
  923. /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
  924. * so the programs (can be more than one that used this map) were
  925. * disconnected from events. Wait for outstanding critical sections in
  926. * these programs to complete
  927. */
  928. synchronize_rcu();
  929. /* some of free_htab_elem() callbacks for elements of this map may
  930. * not have executed. Wait for them.
  931. */
  932. rcu_barrier();
  933. if (!htab_is_prealloc(htab))
  934. delete_all_elements(htab);
  935. else
  936. prealloc_destroy(htab);
  937. free_percpu(htab->extra_elems);
  938. bpf_map_area_free(htab->buckets);
  939. kfree(htab);
  940. }
  941. const struct bpf_map_ops htab_map_ops = {
  942. .map_alloc = htab_map_alloc,
  943. .map_free = htab_map_free,
  944. .map_get_next_key = htab_map_get_next_key,
  945. .map_lookup_elem = htab_map_lookup_elem,
  946. .map_update_elem = htab_map_update_elem,
  947. .map_delete_elem = htab_map_delete_elem,
  948. .map_gen_lookup = htab_map_gen_lookup,
  949. };
  950. const struct bpf_map_ops htab_lru_map_ops = {
  951. .map_alloc = htab_map_alloc,
  952. .map_free = htab_map_free,
  953. .map_get_next_key = htab_map_get_next_key,
  954. .map_lookup_elem = htab_lru_map_lookup_elem,
  955. .map_update_elem = htab_lru_map_update_elem,
  956. .map_delete_elem = htab_lru_map_delete_elem,
  957. .map_gen_lookup = htab_lru_map_gen_lookup,
  958. };
  959. /* Called from eBPF program */
  960. static void *htab_percpu_map_lookup_elem(struct bpf_map *map, void *key)
  961. {
  962. struct htab_elem *l = __htab_map_lookup_elem(map, key);
  963. if (l)
  964. return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size));
  965. else
  966. return NULL;
  967. }
  968. static void *htab_lru_percpu_map_lookup_elem(struct bpf_map *map, void *key)
  969. {
  970. struct htab_elem *l = __htab_map_lookup_elem(map, key);
  971. if (l) {
  972. bpf_lru_node_set_ref(&l->lru_node);
  973. return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size));
  974. }
  975. return NULL;
  976. }
  977. int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value)
  978. {
  979. struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
  980. struct htab_elem *l;
  981. void __percpu *pptr;
  982. int ret = -ENOENT;
  983. int cpu, off = 0;
  984. u32 size;
  985. /* per_cpu areas are zero-filled and bpf programs can only
  986. * access 'value_size' of them, so copying rounded areas
  987. * will not leak any kernel data
  988. */
  989. size = round_up(map->value_size, 8);
  990. rcu_read_lock();
  991. l = __htab_map_lookup_elem(map, key);
  992. if (!l)
  993. goto out;
  994. if (htab_is_lru(htab))
  995. bpf_lru_node_set_ref(&l->lru_node);
  996. pptr = htab_elem_get_ptr(l, map->key_size);
  997. for_each_possible_cpu(cpu) {
  998. bpf_long_memcpy(value + off,
  999. per_cpu_ptr(pptr, cpu), size);
  1000. off += size;
  1001. }
  1002. ret = 0;
  1003. out:
  1004. rcu_read_unlock();
  1005. return ret;
  1006. }
  1007. int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
  1008. u64 map_flags)
  1009. {
  1010. struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
  1011. int ret;
  1012. rcu_read_lock();
  1013. if (htab_is_lru(htab))
  1014. ret = __htab_lru_percpu_map_update_elem(map, key, value,
  1015. map_flags, true);
  1016. else
  1017. ret = __htab_percpu_map_update_elem(map, key, value, map_flags,
  1018. true);
  1019. rcu_read_unlock();
  1020. return ret;
  1021. }
  1022. const struct bpf_map_ops htab_percpu_map_ops = {
  1023. .map_alloc = htab_map_alloc,
  1024. .map_free = htab_map_free,
  1025. .map_get_next_key = htab_map_get_next_key,
  1026. .map_lookup_elem = htab_percpu_map_lookup_elem,
  1027. .map_update_elem = htab_percpu_map_update_elem,
  1028. .map_delete_elem = htab_map_delete_elem,
  1029. };
  1030. const struct bpf_map_ops htab_lru_percpu_map_ops = {
  1031. .map_alloc = htab_map_alloc,
  1032. .map_free = htab_map_free,
  1033. .map_get_next_key = htab_map_get_next_key,
  1034. .map_lookup_elem = htab_lru_percpu_map_lookup_elem,
  1035. .map_update_elem = htab_lru_percpu_map_update_elem,
  1036. .map_delete_elem = htab_lru_map_delete_elem,
  1037. };
  1038. static struct bpf_map *fd_htab_map_alloc(union bpf_attr *attr)
  1039. {
  1040. if (attr->value_size != sizeof(u32))
  1041. return ERR_PTR(-EINVAL);
  1042. return htab_map_alloc(attr);
  1043. }
  1044. static void fd_htab_map_free(struct bpf_map *map)
  1045. {
  1046. struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
  1047. struct hlist_nulls_node *n;
  1048. struct hlist_nulls_head *head;
  1049. struct htab_elem *l;
  1050. int i;
  1051. for (i = 0; i < htab->n_buckets; i++) {
  1052. head = select_bucket(htab, i);
  1053. hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
  1054. void *ptr = fd_htab_map_get_ptr(map, l);
  1055. map->ops->map_fd_put_ptr(ptr);
  1056. }
  1057. }
  1058. htab_map_free(map);
  1059. }
  1060. /* only called from syscall */
  1061. int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value)
  1062. {
  1063. void **ptr;
  1064. int ret = 0;
  1065. if (!map->ops->map_fd_sys_lookup_elem)
  1066. return -ENOTSUPP;
  1067. rcu_read_lock();
  1068. ptr = htab_map_lookup_elem(map, key);
  1069. if (ptr)
  1070. *value = map->ops->map_fd_sys_lookup_elem(READ_ONCE(*ptr));
  1071. else
  1072. ret = -ENOENT;
  1073. rcu_read_unlock();
  1074. return ret;
  1075. }
  1076. /* only called from syscall */
  1077. int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
  1078. void *key, void *value, u64 map_flags)
  1079. {
  1080. void *ptr;
  1081. int ret;
  1082. u32 ufd = *(u32 *)value;
  1083. ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
  1084. if (IS_ERR(ptr))
  1085. return PTR_ERR(ptr);
  1086. ret = htab_map_update_elem(map, key, &ptr, map_flags);
  1087. if (ret)
  1088. map->ops->map_fd_put_ptr(ptr);
  1089. return ret;
  1090. }
  1091. static struct bpf_map *htab_of_map_alloc(union bpf_attr *attr)
  1092. {
  1093. struct bpf_map *map, *inner_map_meta;
  1094. inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd);
  1095. if (IS_ERR(inner_map_meta))
  1096. return inner_map_meta;
  1097. map = fd_htab_map_alloc(attr);
  1098. if (IS_ERR(map)) {
  1099. bpf_map_meta_free(inner_map_meta);
  1100. return map;
  1101. }
  1102. map->inner_map_meta = inner_map_meta;
  1103. return map;
  1104. }
  1105. static void *htab_of_map_lookup_elem(struct bpf_map *map, void *key)
  1106. {
  1107. struct bpf_map **inner_map = htab_map_lookup_elem(map, key);
  1108. if (!inner_map)
  1109. return NULL;
  1110. return READ_ONCE(*inner_map);
  1111. }
  1112. static u32 htab_of_map_gen_lookup(struct bpf_map *map,
  1113. struct bpf_insn *insn_buf)
  1114. {
  1115. struct bpf_insn *insn = insn_buf;
  1116. const int ret = BPF_REG_0;
  1117. *insn++ = BPF_EMIT_CALL((u64 (*)(u64, u64, u64, u64, u64))__htab_map_lookup_elem);
  1118. *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 2);
  1119. *insn++ = BPF_ALU64_IMM(BPF_ADD, ret,
  1120. offsetof(struct htab_elem, key) +
  1121. round_up(map->key_size, 8));
  1122. *insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0);
  1123. return insn - insn_buf;
  1124. }
  1125. static void htab_of_map_free(struct bpf_map *map)
  1126. {
  1127. bpf_map_meta_free(map->inner_map_meta);
  1128. fd_htab_map_free(map);
  1129. }
  1130. const struct bpf_map_ops htab_of_maps_map_ops = {
  1131. .map_alloc = htab_of_map_alloc,
  1132. .map_free = htab_of_map_free,
  1133. .map_get_next_key = htab_map_get_next_key,
  1134. .map_lookup_elem = htab_of_map_lookup_elem,
  1135. .map_delete_elem = htab_map_delete_elem,
  1136. .map_fd_get_ptr = bpf_map_fd_get_ptr,
  1137. .map_fd_put_ptr = bpf_map_fd_put_ptr,
  1138. .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
  1139. .map_gen_lookup = htab_of_map_gen_lookup,
  1140. };