hashtab.c 9.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399
  1. /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
  2. *
  3. * This program is free software; you can redistribute it and/or
  4. * modify it under the terms of version 2 of the GNU General Public
  5. * License as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful, but
  8. * WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  10. * General Public License for more details.
  11. */
  12. #include <linux/bpf.h>
  13. #include <linux/jhash.h>
  14. #include <linux/filter.h>
  15. #include <linux/vmalloc.h>
  16. struct bucket {
  17. struct hlist_head head;
  18. raw_spinlock_t lock;
  19. };
  20. struct bpf_htab {
  21. struct bpf_map map;
  22. struct bucket *buckets;
  23. atomic_t count; /* number of elements in this hashtable */
  24. u32 n_buckets; /* number of hash buckets */
  25. u32 elem_size; /* size of each element in bytes */
  26. };
  27. /* each htab element is struct htab_elem + key + value */
  28. struct htab_elem {
  29. struct hlist_node hash_node;
  30. struct rcu_head rcu;
  31. u32 hash;
  32. char key[0] __aligned(8);
  33. };
  34. /* Called from syscall */
  35. static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
  36. {
  37. struct bpf_htab *htab;
  38. int err, i;
  39. htab = kzalloc(sizeof(*htab), GFP_USER);
  40. if (!htab)
  41. return ERR_PTR(-ENOMEM);
  42. /* mandatory map attributes */
  43. htab->map.key_size = attr->key_size;
  44. htab->map.value_size = attr->value_size;
  45. htab->map.max_entries = attr->max_entries;
  46. /* check sanity of attributes.
  47. * value_size == 0 may be allowed in the future to use map as a set
  48. */
  49. err = -EINVAL;
  50. if (htab->map.max_entries == 0 || htab->map.key_size == 0 ||
  51. htab->map.value_size == 0)
  52. goto free_htab;
  53. /* hash table size must be power of 2 */
  54. htab->n_buckets = roundup_pow_of_two(htab->map.max_entries);
  55. err = -E2BIG;
  56. if (htab->map.key_size > MAX_BPF_STACK)
  57. /* eBPF programs initialize keys on stack, so they cannot be
  58. * larger than max stack size
  59. */
  60. goto free_htab;
  61. if (htab->map.value_size >= (1 << (KMALLOC_SHIFT_MAX - 1)) -
  62. MAX_BPF_STACK - sizeof(struct htab_elem))
  63. /* if value_size is bigger, the user space won't be able to
  64. * access the elements via bpf syscall. This check also makes
  65. * sure that the elem_size doesn't overflow and it's
  66. * kmalloc-able later in htab_map_update_elem()
  67. */
  68. goto free_htab;
  69. htab->elem_size = sizeof(struct htab_elem) +
  70. round_up(htab->map.key_size, 8) +
  71. htab->map.value_size;
  72. /* prevent zero size kmalloc and check for u32 overflow */
  73. if (htab->n_buckets == 0 ||
  74. htab->n_buckets > U32_MAX / sizeof(struct bucket))
  75. goto free_htab;
  76. if ((u64) htab->n_buckets * sizeof(struct bucket) +
  77. (u64) htab->elem_size * htab->map.max_entries >=
  78. U32_MAX - PAGE_SIZE)
  79. /* make sure page count doesn't overflow */
  80. goto free_htab;
  81. htab->map.pages = round_up(htab->n_buckets * sizeof(struct bucket) +
  82. htab->elem_size * htab->map.max_entries,
  83. PAGE_SIZE) >> PAGE_SHIFT;
  84. err = -ENOMEM;
  85. htab->buckets = kmalloc_array(htab->n_buckets, sizeof(struct bucket),
  86. GFP_USER | __GFP_NOWARN);
  87. if (!htab->buckets) {
  88. htab->buckets = vmalloc(htab->n_buckets * sizeof(struct bucket));
  89. if (!htab->buckets)
  90. goto free_htab;
  91. }
  92. for (i = 0; i < htab->n_buckets; i++) {
  93. INIT_HLIST_HEAD(&htab->buckets[i].head);
  94. raw_spin_lock_init(&htab->buckets[i].lock);
  95. }
  96. atomic_set(&htab->count, 0);
  97. return &htab->map;
  98. free_htab:
  99. kfree(htab);
  100. return ERR_PTR(err);
  101. }
  102. static inline u32 htab_map_hash(const void *key, u32 key_len)
  103. {
  104. return jhash(key, key_len, 0);
  105. }
  106. static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash)
  107. {
  108. return &htab->buckets[hash & (htab->n_buckets - 1)];
  109. }
  110. static inline struct hlist_head *select_bucket(struct bpf_htab *htab, u32 hash)
  111. {
  112. return &__select_bucket(htab, hash)->head;
  113. }
  114. static struct htab_elem *lookup_elem_raw(struct hlist_head *head, u32 hash,
  115. void *key, u32 key_size)
  116. {
  117. struct htab_elem *l;
  118. hlist_for_each_entry_rcu(l, head, hash_node)
  119. if (l->hash == hash && !memcmp(&l->key, key, key_size))
  120. return l;
  121. return NULL;
  122. }
  123. /* Called from syscall or from eBPF program */
  124. static void *htab_map_lookup_elem(struct bpf_map *map, void *key)
  125. {
  126. struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
  127. struct hlist_head *head;
  128. struct htab_elem *l;
  129. u32 hash, key_size;
  130. /* Must be called with rcu_read_lock. */
  131. WARN_ON_ONCE(!rcu_read_lock_held());
  132. key_size = map->key_size;
  133. hash = htab_map_hash(key, key_size);
  134. head = select_bucket(htab, hash);
  135. l = lookup_elem_raw(head, hash, key, key_size);
  136. if (l)
  137. return l->key + round_up(map->key_size, 8);
  138. return NULL;
  139. }
  140. /* Called from syscall */
  141. static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
  142. {
  143. struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
  144. struct hlist_head *head;
  145. struct htab_elem *l, *next_l;
  146. u32 hash, key_size;
  147. int i;
  148. WARN_ON_ONCE(!rcu_read_lock_held());
  149. key_size = map->key_size;
  150. hash = htab_map_hash(key, key_size);
  151. head = select_bucket(htab, hash);
  152. /* lookup the key */
  153. l = lookup_elem_raw(head, hash, key, key_size);
  154. if (!l) {
  155. i = 0;
  156. goto find_first_elem;
  157. }
  158. /* key was found, get next key in the same bucket */
  159. next_l = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&l->hash_node)),
  160. struct htab_elem, hash_node);
  161. if (next_l) {
  162. /* if next elem in this hash list is non-zero, just return it */
  163. memcpy(next_key, next_l->key, key_size);
  164. return 0;
  165. }
  166. /* no more elements in this hash list, go to the next bucket */
  167. i = hash & (htab->n_buckets - 1);
  168. i++;
  169. find_first_elem:
  170. /* iterate over buckets */
  171. for (; i < htab->n_buckets; i++) {
  172. head = select_bucket(htab, i);
  173. /* pick first element in the bucket */
  174. next_l = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),
  175. struct htab_elem, hash_node);
  176. if (next_l) {
  177. /* if it's not empty, just return it */
  178. memcpy(next_key, next_l->key, key_size);
  179. return 0;
  180. }
  181. }
  182. /* itereated over all buckets and all elements */
  183. return -ENOENT;
  184. }
  185. /* Called from syscall or from eBPF program */
  186. static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
  187. u64 map_flags)
  188. {
  189. struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
  190. struct htab_elem *l_new, *l_old;
  191. struct hlist_head *head;
  192. struct bucket *b;
  193. unsigned long flags;
  194. u32 key_size;
  195. int ret;
  196. if (map_flags > BPF_EXIST)
  197. /* unknown flags */
  198. return -EINVAL;
  199. WARN_ON_ONCE(!rcu_read_lock_held());
  200. /* allocate new element outside of lock */
  201. l_new = kmalloc(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN);
  202. if (!l_new)
  203. return -ENOMEM;
  204. key_size = map->key_size;
  205. memcpy(l_new->key, key, key_size);
  206. memcpy(l_new->key + round_up(key_size, 8), value, map->value_size);
  207. l_new->hash = htab_map_hash(l_new->key, key_size);
  208. b = __select_bucket(htab, l_new->hash);
  209. head = &b->head;
  210. /* bpf_map_update_elem() can be called in_irq() */
  211. raw_spin_lock_irqsave(&b->lock, flags);
  212. l_old = lookup_elem_raw(head, l_new->hash, key, key_size);
  213. if (!l_old && unlikely(atomic_read(&htab->count) >= map->max_entries)) {
  214. /* if elem with this 'key' doesn't exist and we've reached
  215. * max_entries limit, fail insertion of new elem
  216. */
  217. ret = -E2BIG;
  218. goto err;
  219. }
  220. if (l_old && map_flags == BPF_NOEXIST) {
  221. /* elem already exists */
  222. ret = -EEXIST;
  223. goto err;
  224. }
  225. if (!l_old && map_flags == BPF_EXIST) {
  226. /* elem doesn't exist, cannot update it */
  227. ret = -ENOENT;
  228. goto err;
  229. }
  230. /* add new element to the head of the list, so that concurrent
  231. * search will find it before old elem
  232. */
  233. hlist_add_head_rcu(&l_new->hash_node, head);
  234. if (l_old) {
  235. hlist_del_rcu(&l_old->hash_node);
  236. kfree_rcu(l_old, rcu);
  237. } else {
  238. atomic_inc(&htab->count);
  239. }
  240. raw_spin_unlock_irqrestore(&b->lock, flags);
  241. return 0;
  242. err:
  243. raw_spin_unlock_irqrestore(&b->lock, flags);
  244. kfree(l_new);
  245. return ret;
  246. }
  247. /* Called from syscall or from eBPF program */
  248. static int htab_map_delete_elem(struct bpf_map *map, void *key)
  249. {
  250. struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
  251. struct hlist_head *head;
  252. struct bucket *b;
  253. struct htab_elem *l;
  254. unsigned long flags;
  255. u32 hash, key_size;
  256. int ret = -ENOENT;
  257. WARN_ON_ONCE(!rcu_read_lock_held());
  258. key_size = map->key_size;
  259. hash = htab_map_hash(key, key_size);
  260. b = __select_bucket(htab, hash);
  261. head = &b->head;
  262. raw_spin_lock_irqsave(&b->lock, flags);
  263. l = lookup_elem_raw(head, hash, key, key_size);
  264. if (l) {
  265. hlist_del_rcu(&l->hash_node);
  266. atomic_dec(&htab->count);
  267. kfree_rcu(l, rcu);
  268. ret = 0;
  269. }
  270. raw_spin_unlock_irqrestore(&b->lock, flags);
  271. return ret;
  272. }
  273. static void delete_all_elements(struct bpf_htab *htab)
  274. {
  275. int i;
  276. for (i = 0; i < htab->n_buckets; i++) {
  277. struct hlist_head *head = select_bucket(htab, i);
  278. struct hlist_node *n;
  279. struct htab_elem *l;
  280. hlist_for_each_entry_safe(l, n, head, hash_node) {
  281. hlist_del_rcu(&l->hash_node);
  282. atomic_dec(&htab->count);
  283. kfree(l);
  284. }
  285. }
  286. }
  287. /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
  288. static void htab_map_free(struct bpf_map *map)
  289. {
  290. struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
  291. /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
  292. * so the programs (can be more than one that used this map) were
  293. * disconnected from events. Wait for outstanding critical sections in
  294. * these programs to complete
  295. */
  296. synchronize_rcu();
  297. /* some of kfree_rcu() callbacks for elements of this map may not have
  298. * executed. It's ok. Proceed to free residual elements and map itself
  299. */
  300. delete_all_elements(htab);
  301. kvfree(htab->buckets);
  302. kfree(htab);
  303. }
  304. static const struct bpf_map_ops htab_ops = {
  305. .map_alloc = htab_map_alloc,
  306. .map_free = htab_map_free,
  307. .map_get_next_key = htab_map_get_next_key,
  308. .map_lookup_elem = htab_map_lookup_elem,
  309. .map_update_elem = htab_map_update_elem,
  310. .map_delete_elem = htab_map_delete_elem,
  311. };
  312. static struct bpf_map_type_list htab_type __read_mostly = {
  313. .ops = &htab_ops,
  314. .type = BPF_MAP_TYPE_HASH,
  315. };
  316. static int __init register_htab_map(void)
  317. {
  318. bpf_register_map_type(&htab_type);
  319. return 0;
  320. }
  321. late_initcall(register_htab_map);