rhashtable.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971
  1. /*
  2. * Resizable, Scalable, Concurrent Hash Table
  3. *
  4. * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
  5. * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
  6. *
  7. * Based on the following paper:
  8. * https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf
  9. *
  10. * Code partially derived from nft_hash
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License version 2 as
  14. * published by the Free Software Foundation.
  15. */
  16. #include <linux/kernel.h>
  17. #include <linux/init.h>
  18. #include <linux/log2.h>
  19. #include <linux/sched.h>
  20. #include <linux/slab.h>
  21. #include <linux/vmalloc.h>
  22. #include <linux/mm.h>
  23. #include <linux/jhash.h>
  24. #include <linux/random.h>
  25. #include <linux/rhashtable.h>
  26. #include <linux/err.h>
  27. #define HASH_DEFAULT_SIZE 64UL
  28. #define HASH_MIN_SIZE 4UL
  29. #define BUCKET_LOCKS_PER_CPU 128UL
  30. /* Base bits plus 1 bit for nulls marker */
  31. #define HASH_RESERVED_SPACE (RHT_BASE_BITS + 1)
  32. enum {
  33. RHT_LOCK_NORMAL,
  34. RHT_LOCK_NESTED,
  35. };
  36. /* The bucket lock is selected based on the hash and protects mutations
  37. * on a group of hash buckets.
  38. *
  39. * A maximum of tbl->size/2 bucket locks is allocated. This ensures that
  40. * a single lock always covers both buckets which may both contains
  41. * entries which link to the same bucket of the old table during resizing.
  42. * This allows to simplify the locking as locking the bucket in both
  43. * tables during resize always guarantee protection.
  44. *
  45. * IMPORTANT: When holding the bucket lock of both the old and new table
  46. * during expansions and shrinking, the old bucket lock must always be
  47. * acquired first.
  48. */
  49. static spinlock_t *bucket_lock(const struct bucket_table *tbl, u32 hash)
  50. {
  51. return &tbl->locks[hash & tbl->locks_mask];
  52. }
  53. static void *rht_obj(const struct rhashtable *ht, const struct rhash_head *he)
  54. {
  55. return (void *) he - ht->p.head_offset;
  56. }
  57. static u32 rht_bucket_index(const struct bucket_table *tbl, u32 hash)
  58. {
  59. return (hash >> HASH_RESERVED_SPACE) & (tbl->size - 1);
  60. }
  61. static u32 key_hashfn(struct rhashtable *ht, const struct bucket_table *tbl,
  62. const void *key)
  63. {
  64. return rht_bucket_index(tbl, ht->p.hashfn(key, ht->p.key_len,
  65. tbl->hash_rnd));
  66. }
  67. static u32 head_hashfn(struct rhashtable *ht,
  68. const struct bucket_table *tbl,
  69. const struct rhash_head *he)
  70. {
  71. const char *ptr = rht_obj(ht, he);
  72. return likely(ht->p.key_len) ?
  73. key_hashfn(ht, tbl, ptr + ht->p.key_offset) :
  74. rht_bucket_index(tbl, ht->p.obj_hashfn(ptr, tbl->hash_rnd));
  75. }
  76. #ifdef CONFIG_PROVE_LOCKING
  77. #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
  78. int lockdep_rht_mutex_is_held(struct rhashtable *ht)
  79. {
  80. return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1;
  81. }
  82. EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
  83. int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
  84. {
  85. spinlock_t *lock = bucket_lock(tbl, hash);
  86. return (debug_locks) ? lockdep_is_held(lock) : 1;
  87. }
  88. EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
  89. #else
  90. #define ASSERT_RHT_MUTEX(HT)
  91. #endif
  92. static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl)
  93. {
  94. unsigned int i, size;
  95. #if defined(CONFIG_PROVE_LOCKING)
  96. unsigned int nr_pcpus = 2;
  97. #else
  98. unsigned int nr_pcpus = num_possible_cpus();
  99. #endif
  100. nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL);
  101. size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul);
  102. /* Never allocate more than 0.5 locks per bucket */
  103. size = min_t(unsigned int, size, tbl->size >> 1);
  104. if (sizeof(spinlock_t) != 0) {
  105. #ifdef CONFIG_NUMA
  106. if (size * sizeof(spinlock_t) > PAGE_SIZE)
  107. tbl->locks = vmalloc(size * sizeof(spinlock_t));
  108. else
  109. #endif
  110. tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
  111. GFP_KERNEL);
  112. if (!tbl->locks)
  113. return -ENOMEM;
  114. for (i = 0; i < size; i++)
  115. spin_lock_init(&tbl->locks[i]);
  116. }
  117. tbl->locks_mask = size - 1;
  118. return 0;
  119. }
  120. static void bucket_table_free(const struct bucket_table *tbl)
  121. {
  122. if (tbl)
  123. kvfree(tbl->locks);
  124. kvfree(tbl);
  125. }
  126. static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
  127. size_t nbuckets)
  128. {
  129. struct bucket_table *tbl = NULL;
  130. size_t size;
  131. int i;
  132. size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
  133. if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
  134. tbl = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
  135. if (tbl == NULL)
  136. tbl = vzalloc(size);
  137. if (tbl == NULL)
  138. return NULL;
  139. tbl->size = nbuckets;
  140. if (alloc_bucket_locks(ht, tbl) < 0) {
  141. bucket_table_free(tbl);
  142. return NULL;
  143. }
  144. for (i = 0; i < nbuckets; i++)
  145. INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i);
  146. return tbl;
  147. }
  148. /**
  149. * rht_grow_above_75 - returns true if nelems > 0.75 * table-size
  150. * @ht: hash table
  151. * @new_size: new table size
  152. */
  153. static bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size)
  154. {
  155. /* Expand table when exceeding 75% load */
  156. return atomic_read(&ht->nelems) > (new_size / 4 * 3) &&
  157. (!ht->p.max_shift || atomic_read(&ht->shift) < ht->p.max_shift);
  158. }
  159. /**
  160. * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
  161. * @ht: hash table
  162. * @new_size: new table size
  163. */
  164. static bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size)
  165. {
  166. /* Shrink table beneath 30% load */
  167. return atomic_read(&ht->nelems) < (new_size * 3 / 10) &&
  168. (atomic_read(&ht->shift) > ht->p.min_shift);
  169. }
  170. static int rhashtable_rehash_one(struct rhashtable *ht, unsigned old_hash)
  171. {
  172. struct bucket_table *new_tbl = rht_dereference(ht->future_tbl, ht);
  173. struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
  174. struct rhash_head __rcu **pprev = &old_tbl->buckets[old_hash];
  175. int err = -ENOENT;
  176. struct rhash_head *head, *next, *entry;
  177. spinlock_t *new_bucket_lock;
  178. unsigned new_hash;
  179. rht_for_each(entry, old_tbl, old_hash) {
  180. err = 0;
  181. next = rht_dereference_bucket(entry->next, old_tbl, old_hash);
  182. if (rht_is_a_nulls(next))
  183. break;
  184. pprev = &entry->next;
  185. }
  186. if (err)
  187. goto out;
  188. new_hash = head_hashfn(ht, new_tbl, entry);
  189. new_bucket_lock = bucket_lock(new_tbl, new_hash);
  190. spin_lock_nested(new_bucket_lock, RHT_LOCK_NESTED);
  191. head = rht_dereference_bucket(new_tbl->buckets[new_hash],
  192. new_tbl, new_hash);
  193. if (rht_is_a_nulls(head))
  194. INIT_RHT_NULLS_HEAD(entry->next, ht, new_hash);
  195. else
  196. RCU_INIT_POINTER(entry->next, head);
  197. rcu_assign_pointer(new_tbl->buckets[new_hash], entry);
  198. spin_unlock(new_bucket_lock);
  199. rcu_assign_pointer(*pprev, next);
  200. out:
  201. return err;
  202. }
  203. static void rhashtable_rehash_chain(struct rhashtable *ht, unsigned old_hash)
  204. {
  205. struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
  206. spinlock_t *old_bucket_lock;
  207. old_bucket_lock = bucket_lock(old_tbl, old_hash);
  208. spin_lock_bh(old_bucket_lock);
  209. while (!rhashtable_rehash_one(ht, old_hash))
  210. ;
  211. spin_unlock_bh(old_bucket_lock);
  212. }
  213. static void rhashtable_rehash(struct rhashtable *ht,
  214. struct bucket_table *new_tbl)
  215. {
  216. struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
  217. unsigned old_hash;
  218. get_random_bytes(&new_tbl->hash_rnd, sizeof(new_tbl->hash_rnd));
  219. /* Make insertions go into the new, empty table right away. Deletions
  220. * and lookups will be attempted in both tables until we synchronize.
  221. * The synchronize_rcu() guarantees for the new table to be picked up
  222. * so no new additions go into the old table while we relink.
  223. */
  224. rcu_assign_pointer(ht->future_tbl, new_tbl);
  225. for (old_hash = 0; old_hash < old_tbl->size; old_hash++)
  226. rhashtable_rehash_chain(ht, old_hash);
  227. /* Publish the new table pointer. */
  228. rcu_assign_pointer(ht->tbl, new_tbl);
  229. /* Wait for readers. All new readers will see the new
  230. * table, and thus no references to the old table will
  231. * remain.
  232. */
  233. synchronize_rcu();
  234. bucket_table_free(old_tbl);
  235. }
  236. /**
  237. * rhashtable_expand - Expand hash table while allowing concurrent lookups
  238. * @ht: the hash table to expand
  239. *
  240. * A secondary bucket array is allocated and the hash entries are migrated.
  241. *
  242. * This function may only be called in a context where it is safe to call
  243. * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
  244. *
  245. * The caller must ensure that no concurrent resizing occurs by holding
  246. * ht->mutex.
  247. *
  248. * It is valid to have concurrent insertions and deletions protected by per
  249. * bucket locks or concurrent RCU protected lookups and traversals.
  250. */
  251. int rhashtable_expand(struct rhashtable *ht)
  252. {
  253. struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
  254. ASSERT_RHT_MUTEX(ht);
  255. new_tbl = bucket_table_alloc(ht, old_tbl->size * 2);
  256. if (new_tbl == NULL)
  257. return -ENOMEM;
  258. new_tbl->hash_rnd = old_tbl->hash_rnd;
  259. atomic_inc(&ht->shift);
  260. rhashtable_rehash(ht, new_tbl);
  261. return 0;
  262. }
  263. EXPORT_SYMBOL_GPL(rhashtable_expand);
  264. /**
  265. * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
  266. * @ht: the hash table to shrink
  267. *
  268. * This function may only be called in a context where it is safe to call
  269. * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
  270. *
  271. * The caller must ensure that no concurrent resizing occurs by holding
  272. * ht->mutex.
  273. *
  274. * The caller must ensure that no concurrent table mutations take place.
  275. * It is however valid to have concurrent lookups if they are RCU protected.
  276. *
  277. * It is valid to have concurrent insertions and deletions protected by per
  278. * bucket locks or concurrent RCU protected lookups and traversals.
  279. */
  280. int rhashtable_shrink(struct rhashtable *ht)
  281. {
  282. struct bucket_table *new_tbl, *tbl = rht_dereference(ht->tbl, ht);
  283. ASSERT_RHT_MUTEX(ht);
  284. new_tbl = bucket_table_alloc(ht, tbl->size / 2);
  285. if (new_tbl == NULL)
  286. return -ENOMEM;
  287. new_tbl->hash_rnd = tbl->hash_rnd;
  288. atomic_dec(&ht->shift);
  289. rhashtable_rehash(ht, new_tbl);
  290. return 0;
  291. }
  292. EXPORT_SYMBOL_GPL(rhashtable_shrink);
  293. static void rht_deferred_worker(struct work_struct *work)
  294. {
  295. struct rhashtable *ht;
  296. struct bucket_table *tbl;
  297. struct rhashtable_walker *walker;
  298. ht = container_of(work, struct rhashtable, run_work);
  299. mutex_lock(&ht->mutex);
  300. if (ht->being_destroyed)
  301. goto unlock;
  302. tbl = rht_dereference(ht->tbl, ht);
  303. list_for_each_entry(walker, &ht->walkers, list)
  304. walker->resize = true;
  305. if (rht_grow_above_75(ht, tbl->size))
  306. rhashtable_expand(ht);
  307. else if (rht_shrink_below_30(ht, tbl->size))
  308. rhashtable_shrink(ht);
  309. unlock:
  310. mutex_unlock(&ht->mutex);
  311. }
  312. static bool __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
  313. bool (*compare)(void *, void *), void *arg)
  314. {
  315. struct bucket_table *tbl, *old_tbl;
  316. struct rhash_head *head;
  317. bool no_resize_running;
  318. unsigned hash;
  319. bool success = true;
  320. rcu_read_lock();
  321. old_tbl = rht_dereference_rcu(ht->tbl, ht);
  322. hash = head_hashfn(ht, old_tbl, obj);
  323. spin_lock_bh(bucket_lock(old_tbl, hash));
  324. /* Because we have already taken the bucket lock in old_tbl,
  325. * if we find that future_tbl is not yet visible then that
  326. * guarantees all other insertions of the same entry will
  327. * also grab the bucket lock in old_tbl because until the
  328. * rehash completes ht->tbl won't be changed.
  329. */
  330. tbl = rht_dereference_rcu(ht->future_tbl, ht);
  331. if (tbl != old_tbl) {
  332. hash = head_hashfn(ht, tbl, obj);
  333. spin_lock_nested(bucket_lock(tbl, hash), RHT_LOCK_NESTED);
  334. }
  335. if (compare &&
  336. rhashtable_lookup_compare(ht, rht_obj(ht, obj) + ht->p.key_offset,
  337. compare, arg)) {
  338. success = false;
  339. goto exit;
  340. }
  341. no_resize_running = tbl == old_tbl;
  342. head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
  343. if (rht_is_a_nulls(head))
  344. INIT_RHT_NULLS_HEAD(obj->next, ht, hash);
  345. else
  346. RCU_INIT_POINTER(obj->next, head);
  347. rcu_assign_pointer(tbl->buckets[hash], obj);
  348. atomic_inc(&ht->nelems);
  349. if (no_resize_running && rht_grow_above_75(ht, tbl->size))
  350. schedule_work(&ht->run_work);
  351. exit:
  352. if (tbl != old_tbl) {
  353. hash = head_hashfn(ht, tbl, obj);
  354. spin_unlock(bucket_lock(tbl, hash));
  355. }
  356. hash = head_hashfn(ht, old_tbl, obj);
  357. spin_unlock_bh(bucket_lock(old_tbl, hash));
  358. rcu_read_unlock();
  359. return success;
  360. }
  361. /**
  362. * rhashtable_insert - insert object into hash table
  363. * @ht: hash table
  364. * @obj: pointer to hash head inside object
  365. *
  366. * Will take a per bucket spinlock to protect against mutual mutations
  367. * on the same bucket. Multiple insertions may occur in parallel unless
  368. * they map to the same bucket lock.
  369. *
  370. * It is safe to call this function from atomic context.
  371. *
  372. * Will trigger an automatic deferred table resizing if the size grows
  373. * beyond the watermark indicated by grow_decision() which can be passed
  374. * to rhashtable_init().
  375. */
  376. void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj)
  377. {
  378. __rhashtable_insert(ht, obj, NULL, NULL);
  379. }
  380. EXPORT_SYMBOL_GPL(rhashtable_insert);
  381. static bool __rhashtable_remove(struct rhashtable *ht,
  382. struct bucket_table *tbl,
  383. struct rhash_head *obj)
  384. {
  385. struct rhash_head __rcu **pprev;
  386. struct rhash_head *he;
  387. spinlock_t * lock;
  388. unsigned hash;
  389. bool ret = false;
  390. hash = head_hashfn(ht, tbl, obj);
  391. lock = bucket_lock(tbl, hash);
  392. spin_lock_bh(lock);
  393. pprev = &tbl->buckets[hash];
  394. rht_for_each(he, tbl, hash) {
  395. if (he != obj) {
  396. pprev = &he->next;
  397. continue;
  398. }
  399. rcu_assign_pointer(*pprev, obj->next);
  400. ret = true;
  401. break;
  402. }
  403. spin_unlock_bh(lock);
  404. return ret;
  405. }
  406. /**
  407. * rhashtable_remove - remove object from hash table
  408. * @ht: hash table
  409. * @obj: pointer to hash head inside object
  410. *
  411. * Since the hash chain is single linked, the removal operation needs to
  412. * walk the bucket chain upon removal. The removal operation is thus
  413. * considerable slow if the hash table is not correctly sized.
  414. *
  415. * Will automatically shrink the table via rhashtable_expand() if the
  416. * shrink_decision function specified at rhashtable_init() returns true.
  417. *
  418. * The caller must ensure that no concurrent table mutations occur. It is
  419. * however valid to have concurrent lookups if they are RCU protected.
  420. */
  421. bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj)
  422. {
  423. struct bucket_table *tbl, *old_tbl;
  424. bool ret;
  425. rcu_read_lock();
  426. old_tbl = rht_dereference_rcu(ht->tbl, ht);
  427. ret = __rhashtable_remove(ht, old_tbl, obj);
  428. /* Because we have already taken (and released) the bucket
  429. * lock in old_tbl, if we find that future_tbl is not yet
  430. * visible then that guarantees the entry to still be in
  431. * old_tbl if it exists.
  432. */
  433. tbl = rht_dereference_rcu(ht->future_tbl, ht);
  434. if (!ret && old_tbl != tbl)
  435. ret = __rhashtable_remove(ht, tbl, obj);
  436. if (ret) {
  437. bool no_resize_running = tbl == old_tbl;
  438. atomic_dec(&ht->nelems);
  439. if (no_resize_running && rht_shrink_below_30(ht, tbl->size))
  440. schedule_work(&ht->run_work);
  441. }
  442. rcu_read_unlock();
  443. return ret;
  444. }
  445. EXPORT_SYMBOL_GPL(rhashtable_remove);
  446. struct rhashtable_compare_arg {
  447. struct rhashtable *ht;
  448. const void *key;
  449. };
  450. static bool rhashtable_compare(void *ptr, void *arg)
  451. {
  452. struct rhashtable_compare_arg *x = arg;
  453. struct rhashtable *ht = x->ht;
  454. return !memcmp(ptr + ht->p.key_offset, x->key, ht->p.key_len);
  455. }
  456. /**
  457. * rhashtable_lookup - lookup key in hash table
  458. * @ht: hash table
  459. * @key: pointer to key
  460. *
  461. * Computes the hash value for the key and traverses the bucket chain looking
  462. * for a entry with an identical key. The first matching entry is returned.
  463. *
  464. * This lookup function may only be used for fixed key hash table (key_len
  465. * parameter set). It will BUG() if used inappropriately.
  466. *
  467. * Lookups may occur in parallel with hashtable mutations and resizing.
  468. */
  469. void *rhashtable_lookup(struct rhashtable *ht, const void *key)
  470. {
  471. struct rhashtable_compare_arg arg = {
  472. .ht = ht,
  473. .key = key,
  474. };
  475. BUG_ON(!ht->p.key_len);
  476. return rhashtable_lookup_compare(ht, key, &rhashtable_compare, &arg);
  477. }
  478. EXPORT_SYMBOL_GPL(rhashtable_lookup);
  479. /**
  480. * rhashtable_lookup_compare - search hash table with compare function
  481. * @ht: hash table
  482. * @key: the pointer to the key
  483. * @compare: compare function, must return true on match
  484. * @arg: argument passed on to compare function
  485. *
  486. * Traverses the bucket chain behind the provided hash value and calls the
  487. * specified compare function for each entry.
  488. *
  489. * Lookups may occur in parallel with hashtable mutations and resizing.
  490. *
  491. * Returns the first entry on which the compare function returned true.
  492. */
  493. void *rhashtable_lookup_compare(struct rhashtable *ht, const void *key,
  494. bool (*compare)(void *, void *), void *arg)
  495. {
  496. const struct bucket_table *tbl, *old_tbl;
  497. struct rhash_head *he;
  498. u32 hash;
  499. rcu_read_lock();
  500. tbl = rht_dereference_rcu(ht->tbl, ht);
  501. hash = key_hashfn(ht, tbl, key);
  502. restart:
  503. rht_for_each_rcu(he, tbl, hash) {
  504. if (!compare(rht_obj(ht, he), arg))
  505. continue;
  506. rcu_read_unlock();
  507. return rht_obj(ht, he);
  508. }
  509. old_tbl = tbl;
  510. tbl = rht_dereference_rcu(ht->future_tbl, ht);
  511. if (unlikely(tbl != old_tbl))
  512. goto restart;
  513. rcu_read_unlock();
  514. return NULL;
  515. }
  516. EXPORT_SYMBOL_GPL(rhashtable_lookup_compare);
  517. /**
  518. * rhashtable_lookup_insert - lookup and insert object into hash table
  519. * @ht: hash table
  520. * @obj: pointer to hash head inside object
  521. *
  522. * Locks down the bucket chain in both the old and new table if a resize
  523. * is in progress to ensure that writers can't remove from the old table
  524. * and can't insert to the new table during the atomic operation of search
  525. * and insertion. Searches for duplicates in both the old and new table if
  526. * a resize is in progress.
  527. *
  528. * This lookup function may only be used for fixed key hash table (key_len
  529. * parameter set). It will BUG() if used inappropriately.
  530. *
  531. * It is safe to call this function from atomic context.
  532. *
  533. * Will trigger an automatic deferred table resizing if the size grows
  534. * beyond the watermark indicated by grow_decision() which can be passed
  535. * to rhashtable_init().
  536. */
  537. bool rhashtable_lookup_insert(struct rhashtable *ht, struct rhash_head *obj)
  538. {
  539. struct rhashtable_compare_arg arg = {
  540. .ht = ht,
  541. .key = rht_obj(ht, obj) + ht->p.key_offset,
  542. };
  543. BUG_ON(!ht->p.key_len);
  544. return rhashtable_lookup_compare_insert(ht, obj, &rhashtable_compare,
  545. &arg);
  546. }
  547. EXPORT_SYMBOL_GPL(rhashtable_lookup_insert);
  548. /**
  549. * rhashtable_lookup_compare_insert - search and insert object to hash table
  550. * with compare function
  551. * @ht: hash table
  552. * @obj: pointer to hash head inside object
  553. * @compare: compare function, must return true on match
  554. * @arg: argument passed on to compare function
  555. *
  556. * Locks down the bucket chain in both the old and new table if a resize
  557. * is in progress to ensure that writers can't remove from the old table
  558. * and can't insert to the new table during the atomic operation of search
  559. * and insertion. Searches for duplicates in both the old and new table if
  560. * a resize is in progress.
  561. *
  562. * Lookups may occur in parallel with hashtable mutations and resizing.
  563. *
  564. * Will trigger an automatic deferred table resizing if the size grows
  565. * beyond the watermark indicated by grow_decision() which can be passed
  566. * to rhashtable_init().
  567. */
  568. bool rhashtable_lookup_compare_insert(struct rhashtable *ht,
  569. struct rhash_head *obj,
  570. bool (*compare)(void *, void *),
  571. void *arg)
  572. {
  573. BUG_ON(!ht->p.key_len);
  574. return __rhashtable_insert(ht, obj, compare, arg);
  575. }
  576. EXPORT_SYMBOL_GPL(rhashtable_lookup_compare_insert);
  577. /**
  578. * rhashtable_walk_init - Initialise an iterator
  579. * @ht: Table to walk over
  580. * @iter: Hash table Iterator
  581. *
  582. * This function prepares a hash table walk.
  583. *
  584. * Note that if you restart a walk after rhashtable_walk_stop you
  585. * may see the same object twice. Also, you may miss objects if
  586. * there are removals in between rhashtable_walk_stop and the next
  587. * call to rhashtable_walk_start.
  588. *
  589. * For a completely stable walk you should construct your own data
  590. * structure outside the hash table.
  591. *
  592. * This function may sleep so you must not call it from interrupt
  593. * context or with spin locks held.
  594. *
  595. * You must call rhashtable_walk_exit if this function returns
  596. * successfully.
  597. */
  598. int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter)
  599. {
  600. iter->ht = ht;
  601. iter->p = NULL;
  602. iter->slot = 0;
  603. iter->skip = 0;
  604. iter->walker = kmalloc(sizeof(*iter->walker), GFP_KERNEL);
  605. if (!iter->walker)
  606. return -ENOMEM;
  607. INIT_LIST_HEAD(&iter->walker->list);
  608. iter->walker->resize = false;
  609. mutex_lock(&ht->mutex);
  610. list_add(&iter->walker->list, &ht->walkers);
  611. mutex_unlock(&ht->mutex);
  612. return 0;
  613. }
  614. EXPORT_SYMBOL_GPL(rhashtable_walk_init);
  615. /**
  616. * rhashtable_walk_exit - Free an iterator
  617. * @iter: Hash table Iterator
  618. *
  619. * This function frees resources allocated by rhashtable_walk_init.
  620. */
  621. void rhashtable_walk_exit(struct rhashtable_iter *iter)
  622. {
  623. mutex_lock(&iter->ht->mutex);
  624. list_del(&iter->walker->list);
  625. mutex_unlock(&iter->ht->mutex);
  626. kfree(iter->walker);
  627. }
  628. EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
  629. /**
  630. * rhashtable_walk_start - Start a hash table walk
  631. * @iter: Hash table iterator
  632. *
  633. * Start a hash table walk. Note that we take the RCU lock in all
  634. * cases including when we return an error. So you must always call
  635. * rhashtable_walk_stop to clean up.
  636. *
  637. * Returns zero if successful.
  638. *
  639. * Returns -EAGAIN if resize event occured. Note that the iterator
  640. * will rewind back to the beginning and you may use it immediately
  641. * by calling rhashtable_walk_next.
  642. */
  643. int rhashtable_walk_start(struct rhashtable_iter *iter)
  644. {
  645. rcu_read_lock();
  646. if (iter->walker->resize) {
  647. iter->slot = 0;
  648. iter->skip = 0;
  649. iter->walker->resize = false;
  650. return -EAGAIN;
  651. }
  652. return 0;
  653. }
  654. EXPORT_SYMBOL_GPL(rhashtable_walk_start);
  655. /**
  656. * rhashtable_walk_next - Return the next object and advance the iterator
  657. * @iter: Hash table iterator
  658. *
  659. * Note that you must call rhashtable_walk_stop when you are finished
  660. * with the walk.
  661. *
  662. * Returns the next object or NULL when the end of the table is reached.
  663. *
  664. * Returns -EAGAIN if resize event occured. Note that the iterator
  665. * will rewind back to the beginning and you may continue to use it.
  666. */
  667. void *rhashtable_walk_next(struct rhashtable_iter *iter)
  668. {
  669. const struct bucket_table *tbl;
  670. struct rhashtable *ht = iter->ht;
  671. struct rhash_head *p = iter->p;
  672. void *obj = NULL;
  673. tbl = rht_dereference_rcu(ht->tbl, ht);
  674. if (p) {
  675. p = rht_dereference_bucket_rcu(p->next, tbl, iter->slot);
  676. goto next;
  677. }
  678. for (; iter->slot < tbl->size; iter->slot++) {
  679. int skip = iter->skip;
  680. rht_for_each_rcu(p, tbl, iter->slot) {
  681. if (!skip)
  682. break;
  683. skip--;
  684. }
  685. next:
  686. if (!rht_is_a_nulls(p)) {
  687. iter->skip++;
  688. iter->p = p;
  689. obj = rht_obj(ht, p);
  690. goto out;
  691. }
  692. iter->skip = 0;
  693. }
  694. iter->p = NULL;
  695. out:
  696. if (iter->walker->resize) {
  697. iter->p = NULL;
  698. iter->slot = 0;
  699. iter->skip = 0;
  700. iter->walker->resize = false;
  701. return ERR_PTR(-EAGAIN);
  702. }
  703. return obj;
  704. }
  705. EXPORT_SYMBOL_GPL(rhashtable_walk_next);
  706. /**
  707. * rhashtable_walk_stop - Finish a hash table walk
  708. * @iter: Hash table iterator
  709. *
  710. * Finish a hash table walk.
  711. */
  712. void rhashtable_walk_stop(struct rhashtable_iter *iter)
  713. {
  714. rcu_read_unlock();
  715. iter->p = NULL;
  716. }
  717. EXPORT_SYMBOL_GPL(rhashtable_walk_stop);
  718. static size_t rounded_hashtable_size(struct rhashtable_params *params)
  719. {
  720. return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
  721. 1UL << params->min_shift);
  722. }
  723. /**
  724. * rhashtable_init - initialize a new hash table
  725. * @ht: hash table to be initialized
  726. * @params: configuration parameters
  727. *
  728. * Initializes a new hash table based on the provided configuration
  729. * parameters. A table can be configured either with a variable or
  730. * fixed length key:
  731. *
  732. * Configuration Example 1: Fixed length keys
  733. * struct test_obj {
  734. * int key;
  735. * void * my_member;
  736. * struct rhash_head node;
  737. * };
  738. *
  739. * struct rhashtable_params params = {
  740. * .head_offset = offsetof(struct test_obj, node),
  741. * .key_offset = offsetof(struct test_obj, key),
  742. * .key_len = sizeof(int),
  743. * .hashfn = jhash,
  744. * .nulls_base = (1U << RHT_BASE_SHIFT),
  745. * };
  746. *
  747. * Configuration Example 2: Variable length keys
  748. * struct test_obj {
  749. * [...]
  750. * struct rhash_head node;
  751. * };
  752. *
  753. * u32 my_hash_fn(const void *data, u32 seed)
  754. * {
  755. * struct test_obj *obj = data;
  756. *
  757. * return [... hash ...];
  758. * }
  759. *
  760. * struct rhashtable_params params = {
  761. * .head_offset = offsetof(struct test_obj, node),
  762. * .hashfn = jhash,
  763. * .obj_hashfn = my_hash_fn,
  764. * };
  765. */
  766. int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params)
  767. {
  768. struct bucket_table *tbl;
  769. size_t size;
  770. size = HASH_DEFAULT_SIZE;
  771. if ((params->key_len && !params->hashfn) ||
  772. (!params->key_len && !params->obj_hashfn))
  773. return -EINVAL;
  774. if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))
  775. return -EINVAL;
  776. params->min_shift = max_t(size_t, params->min_shift,
  777. ilog2(HASH_MIN_SIZE));
  778. if (params->nelem_hint)
  779. size = rounded_hashtable_size(params);
  780. memset(ht, 0, sizeof(*ht));
  781. mutex_init(&ht->mutex);
  782. memcpy(&ht->p, params, sizeof(*params));
  783. INIT_LIST_HEAD(&ht->walkers);
  784. if (params->locks_mul)
  785. ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
  786. else
  787. ht->p.locks_mul = BUCKET_LOCKS_PER_CPU;
  788. tbl = bucket_table_alloc(ht, size);
  789. if (tbl == NULL)
  790. return -ENOMEM;
  791. get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
  792. atomic_set(&ht->nelems, 0);
  793. atomic_set(&ht->shift, ilog2(tbl->size));
  794. RCU_INIT_POINTER(ht->tbl, tbl);
  795. RCU_INIT_POINTER(ht->future_tbl, tbl);
  796. INIT_WORK(&ht->run_work, rht_deferred_worker);
  797. return 0;
  798. }
  799. EXPORT_SYMBOL_GPL(rhashtable_init);
  800. /**
  801. * rhashtable_destroy - destroy hash table
  802. * @ht: the hash table to destroy
  803. *
  804. * Frees the bucket array. This function is not rcu safe, therefore the caller
  805. * has to make sure that no resizing may happen by unpublishing the hashtable
  806. * and waiting for the quiescent cycle before releasing the bucket array.
  807. */
  808. void rhashtable_destroy(struct rhashtable *ht)
  809. {
  810. ht->being_destroyed = true;
  811. cancel_work_sync(&ht->run_work);
  812. mutex_lock(&ht->mutex);
  813. bucket_table_free(rht_dereference(ht->tbl, ht));
  814. mutex_unlock(&ht->mutex);
  815. }
  816. EXPORT_SYMBOL_GPL(rhashtable_destroy);