rhashtable.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134
  1. /*
  2. * Resizable, Scalable, Concurrent Hash Table
  3. *
  4. * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
  5. * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
  6. *
  7. * Based on the following paper:
  8. * https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf
  9. *
  10. * Code partially derived from nft_hash
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License version 2 as
  14. * published by the Free Software Foundation.
  15. */
  16. #include <linux/kernel.h>
  17. #include <linux/init.h>
  18. #include <linux/log2.h>
  19. #include <linux/sched.h>
  20. #include <linux/slab.h>
  21. #include <linux/vmalloc.h>
  22. #include <linux/mm.h>
  23. #include <linux/jhash.h>
  24. #include <linux/random.h>
  25. #include <linux/rhashtable.h>
  26. #include <linux/err.h>
  27. #define HASH_DEFAULT_SIZE 64UL
  28. #define HASH_MIN_SIZE 4UL
  29. #define BUCKET_LOCKS_PER_CPU 128UL
  30. /* Base bits plus 1 bit for nulls marker */
  31. #define HASH_RESERVED_SPACE (RHT_BASE_BITS + 1)
  32. enum {
  33. RHT_LOCK_NORMAL,
  34. RHT_LOCK_NESTED,
  35. };
  36. /* The bucket lock is selected based on the hash and protects mutations
  37. * on a group of hash buckets.
  38. *
  39. * A maximum of tbl->size/2 bucket locks is allocated. This ensures that
  40. * a single lock always covers both buckets which may both contains
  41. * entries which link to the same bucket of the old table during resizing.
  42. * This allows to simplify the locking as locking the bucket in both
  43. * tables during resize always guarantee protection.
  44. *
  45. * IMPORTANT: When holding the bucket lock of both the old and new table
  46. * during expansions and shrinking, the old bucket lock must always be
  47. * acquired first.
  48. */
  49. static spinlock_t *bucket_lock(const struct bucket_table *tbl, u32 hash)
  50. {
  51. return &tbl->locks[hash & tbl->locks_mask];
  52. }
  53. static void *rht_obj(const struct rhashtable *ht, const struct rhash_head *he)
  54. {
  55. return (void *) he - ht->p.head_offset;
  56. }
  57. static u32 rht_bucket_index(const struct bucket_table *tbl, u32 hash)
  58. {
  59. return hash & (tbl->size - 1);
  60. }
  61. static u32 obj_raw_hashfn(const struct rhashtable *ht, const void *ptr)
  62. {
  63. u32 hash;
  64. if (unlikely(!ht->p.key_len))
  65. hash = ht->p.obj_hashfn(ptr, ht->p.hash_rnd);
  66. else
  67. hash = ht->p.hashfn(ptr + ht->p.key_offset, ht->p.key_len,
  68. ht->p.hash_rnd);
  69. return hash >> HASH_RESERVED_SPACE;
  70. }
  71. static u32 key_hashfn(struct rhashtable *ht, const void *key, u32 len)
  72. {
  73. return ht->p.hashfn(key, len, ht->p.hash_rnd) >> HASH_RESERVED_SPACE;
  74. }
  75. static u32 head_hashfn(const struct rhashtable *ht,
  76. const struct bucket_table *tbl,
  77. const struct rhash_head *he)
  78. {
  79. return rht_bucket_index(tbl, obj_raw_hashfn(ht, rht_obj(ht, he)));
  80. }
  81. #ifdef CONFIG_PROVE_LOCKING
  82. static void debug_dump_buckets(const struct rhashtable *ht,
  83. const struct bucket_table *tbl)
  84. {
  85. struct rhash_head *he;
  86. unsigned int i, hash;
  87. for (i = 0; i < tbl->size; i++) {
  88. pr_warn(" [Bucket %d] ", i);
  89. rht_for_each_rcu(he, tbl, i) {
  90. hash = head_hashfn(ht, tbl, he);
  91. pr_cont("[hash = %#x, lock = %p] ",
  92. hash, bucket_lock(tbl, hash));
  93. }
  94. pr_cont("\n");
  95. }
  96. }
  97. static void debug_dump_table(struct rhashtable *ht,
  98. const struct bucket_table *tbl,
  99. unsigned int hash)
  100. {
  101. struct bucket_table *old_tbl, *future_tbl;
  102. pr_emerg("BUG: lock for hash %#x in table %p not held\n",
  103. hash, tbl);
  104. rcu_read_lock();
  105. future_tbl = rht_dereference_rcu(ht->future_tbl, ht);
  106. old_tbl = rht_dereference_rcu(ht->tbl, ht);
  107. if (future_tbl != old_tbl) {
  108. pr_warn("Future table %p (size: %zd)\n",
  109. future_tbl, future_tbl->size);
  110. debug_dump_buckets(ht, future_tbl);
  111. }
  112. pr_warn("Table %p (size: %zd)\n", old_tbl, old_tbl->size);
  113. debug_dump_buckets(ht, old_tbl);
  114. rcu_read_unlock();
  115. }
  116. #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
  117. #define ASSERT_BUCKET_LOCK(HT, TBL, HASH) \
  118. do { \
  119. if (unlikely(!lockdep_rht_bucket_is_held(TBL, HASH))) { \
  120. debug_dump_table(HT, TBL, HASH); \
  121. BUG(); \
  122. } \
  123. } while (0)
  124. int lockdep_rht_mutex_is_held(struct rhashtable *ht)
  125. {
  126. return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1;
  127. }
  128. EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
  129. int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
  130. {
  131. spinlock_t *lock = bucket_lock(tbl, hash);
  132. return (debug_locks) ? lockdep_is_held(lock) : 1;
  133. }
  134. EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
  135. #else
  136. #define ASSERT_RHT_MUTEX(HT)
  137. #define ASSERT_BUCKET_LOCK(HT, TBL, HASH)
  138. #endif
  139. static struct rhash_head __rcu **bucket_tail(struct bucket_table *tbl, u32 n)
  140. {
  141. struct rhash_head __rcu **pprev;
  142. for (pprev = &tbl->buckets[n];
  143. !rht_is_a_nulls(rht_dereference_bucket(*pprev, tbl, n));
  144. pprev = &rht_dereference_bucket(*pprev, tbl, n)->next)
  145. ;
  146. return pprev;
  147. }
  148. static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl)
  149. {
  150. unsigned int i, size;
  151. #if defined(CONFIG_PROVE_LOCKING)
  152. unsigned int nr_pcpus = 2;
  153. #else
  154. unsigned int nr_pcpus = num_possible_cpus();
  155. #endif
  156. nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL);
  157. size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul);
  158. /* Never allocate more than 0.5 locks per bucket */
  159. size = min_t(unsigned int, size, tbl->size >> 1);
  160. if (sizeof(spinlock_t) != 0) {
  161. #ifdef CONFIG_NUMA
  162. if (size * sizeof(spinlock_t) > PAGE_SIZE)
  163. tbl->locks = vmalloc(size * sizeof(spinlock_t));
  164. else
  165. #endif
  166. tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
  167. GFP_KERNEL);
  168. if (!tbl->locks)
  169. return -ENOMEM;
  170. for (i = 0; i < size; i++)
  171. spin_lock_init(&tbl->locks[i]);
  172. }
  173. tbl->locks_mask = size - 1;
  174. return 0;
  175. }
  176. static void bucket_table_free(const struct bucket_table *tbl)
  177. {
  178. if (tbl)
  179. kvfree(tbl->locks);
  180. kvfree(tbl);
  181. }
  182. static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
  183. size_t nbuckets)
  184. {
  185. struct bucket_table *tbl = NULL;
  186. size_t size;
  187. int i;
  188. size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
  189. if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
  190. tbl = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
  191. if (tbl == NULL)
  192. tbl = vzalloc(size);
  193. if (tbl == NULL)
  194. return NULL;
  195. tbl->size = nbuckets;
  196. if (alloc_bucket_locks(ht, tbl) < 0) {
  197. bucket_table_free(tbl);
  198. return NULL;
  199. }
  200. for (i = 0; i < nbuckets; i++)
  201. INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i);
  202. return tbl;
  203. }
  204. /**
  205. * rht_grow_above_75 - returns true if nelems > 0.75 * table-size
  206. * @ht: hash table
  207. * @new_size: new table size
  208. */
  209. static bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size)
  210. {
  211. /* Expand table when exceeding 75% load */
  212. return atomic_read(&ht->nelems) > (new_size / 4 * 3) &&
  213. (!ht->p.max_shift || atomic_read(&ht->shift) < ht->p.max_shift);
  214. }
  215. /**
  216. * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
  217. * @ht: hash table
  218. * @new_size: new table size
  219. */
  220. static bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size)
  221. {
  222. /* Shrink table beneath 30% load */
  223. return atomic_read(&ht->nelems) < (new_size * 3 / 10) &&
  224. (atomic_read(&ht->shift) > ht->p.min_shift);
  225. }
  226. static void lock_buckets(struct bucket_table *new_tbl,
  227. struct bucket_table *old_tbl, unsigned int hash)
  228. __acquires(old_bucket_lock)
  229. {
  230. spin_lock_bh(bucket_lock(old_tbl, hash));
  231. if (new_tbl != old_tbl)
  232. spin_lock_bh_nested(bucket_lock(new_tbl, hash),
  233. RHT_LOCK_NESTED);
  234. }
  235. static void unlock_buckets(struct bucket_table *new_tbl,
  236. struct bucket_table *old_tbl, unsigned int hash)
  237. __releases(old_bucket_lock)
  238. {
  239. if (new_tbl != old_tbl)
  240. spin_unlock_bh(bucket_lock(new_tbl, hash));
  241. spin_unlock_bh(bucket_lock(old_tbl, hash));
  242. }
  243. /**
  244. * Unlink entries on bucket which hash to different bucket.
  245. *
  246. * Returns true if no more work needs to be performed on the bucket.
  247. */
  248. static bool hashtable_chain_unzip(struct rhashtable *ht,
  249. const struct bucket_table *new_tbl,
  250. struct bucket_table *old_tbl,
  251. size_t old_hash)
  252. {
  253. struct rhash_head *he, *p, *next;
  254. unsigned int new_hash, new_hash2;
  255. ASSERT_BUCKET_LOCK(ht, old_tbl, old_hash);
  256. /* Old bucket empty, no work needed. */
  257. p = rht_dereference_bucket(old_tbl->buckets[old_hash], old_tbl,
  258. old_hash);
  259. if (rht_is_a_nulls(p))
  260. return false;
  261. new_hash = head_hashfn(ht, new_tbl, p);
  262. ASSERT_BUCKET_LOCK(ht, new_tbl, new_hash);
  263. /* Advance the old bucket pointer one or more times until it
  264. * reaches a node that doesn't hash to the same bucket as the
  265. * previous node p. Call the previous node p;
  266. */
  267. rht_for_each_continue(he, p->next, old_tbl, old_hash) {
  268. new_hash2 = head_hashfn(ht, new_tbl, he);
  269. ASSERT_BUCKET_LOCK(ht, new_tbl, new_hash2);
  270. if (new_hash != new_hash2)
  271. break;
  272. p = he;
  273. }
  274. rcu_assign_pointer(old_tbl->buckets[old_hash], p->next);
  275. /* Find the subsequent node which does hash to the same
  276. * bucket as node P, or NULL if no such node exists.
  277. */
  278. INIT_RHT_NULLS_HEAD(next, ht, old_hash);
  279. if (!rht_is_a_nulls(he)) {
  280. rht_for_each_continue(he, he->next, old_tbl, old_hash) {
  281. if (head_hashfn(ht, new_tbl, he) == new_hash) {
  282. next = he;
  283. break;
  284. }
  285. }
  286. }
  287. /* Set p's next pointer to that subsequent node pointer,
  288. * bypassing the nodes which do not hash to p's bucket
  289. */
  290. rcu_assign_pointer(p->next, next);
  291. p = rht_dereference_bucket(old_tbl->buckets[old_hash], old_tbl,
  292. old_hash);
  293. return !rht_is_a_nulls(p);
  294. }
  295. static void link_old_to_new(struct rhashtable *ht, struct bucket_table *new_tbl,
  296. unsigned int new_hash, struct rhash_head *entry)
  297. {
  298. ASSERT_BUCKET_LOCK(ht, new_tbl, new_hash);
  299. rcu_assign_pointer(*bucket_tail(new_tbl, new_hash), entry);
  300. }
  301. /**
  302. * rhashtable_expand - Expand hash table while allowing concurrent lookups
  303. * @ht: the hash table to expand
  304. *
  305. * A secondary bucket array is allocated and the hash entries are migrated
  306. * while keeping them on both lists until the end of the RCU grace period.
  307. *
  308. * This function may only be called in a context where it is safe to call
  309. * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
  310. *
  311. * The caller must ensure that no concurrent resizing occurs by holding
  312. * ht->mutex.
  313. *
  314. * It is valid to have concurrent insertions and deletions protected by per
  315. * bucket locks or concurrent RCU protected lookups and traversals.
  316. */
  317. int rhashtable_expand(struct rhashtable *ht)
  318. {
  319. struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
  320. struct rhash_head *he;
  321. unsigned int new_hash, old_hash;
  322. bool complete = false;
  323. ASSERT_RHT_MUTEX(ht);
  324. new_tbl = bucket_table_alloc(ht, old_tbl->size * 2);
  325. if (new_tbl == NULL)
  326. return -ENOMEM;
  327. atomic_inc(&ht->shift);
  328. /* Make insertions go into the new, empty table right away. Deletions
  329. * and lookups will be attempted in both tables until we synchronize.
  330. * The synchronize_rcu() guarantees for the new table to be picked up
  331. * so no new additions go into the old table while we relink.
  332. */
  333. rcu_assign_pointer(ht->future_tbl, new_tbl);
  334. synchronize_rcu();
  335. /* For each new bucket, search the corresponding old bucket for the
  336. * first entry that hashes to the new bucket, and link the end of
  337. * newly formed bucket chain (containing entries added to future
  338. * table) to that entry. Since all the entries which will end up in
  339. * the new bucket appear in the same old bucket, this constructs an
  340. * entirely valid new hash table, but with multiple buckets
  341. * "zipped" together into a single imprecise chain.
  342. */
  343. for (new_hash = 0; new_hash < new_tbl->size; new_hash++) {
  344. old_hash = rht_bucket_index(old_tbl, new_hash);
  345. lock_buckets(new_tbl, old_tbl, new_hash);
  346. rht_for_each(he, old_tbl, old_hash) {
  347. if (head_hashfn(ht, new_tbl, he) == new_hash) {
  348. link_old_to_new(ht, new_tbl, new_hash, he);
  349. break;
  350. }
  351. }
  352. unlock_buckets(new_tbl, old_tbl, new_hash);
  353. cond_resched();
  354. }
  355. /* Unzip interleaved hash chains */
  356. while (!complete && !ht->being_destroyed) {
  357. /* Wait for readers. All new readers will see the new
  358. * table, and thus no references to the old table will
  359. * remain.
  360. */
  361. synchronize_rcu();
  362. /* For each bucket in the old table (each of which
  363. * contains items from multiple buckets of the new
  364. * table): ...
  365. */
  366. complete = true;
  367. for (old_hash = 0; old_hash < old_tbl->size; old_hash++) {
  368. lock_buckets(new_tbl, old_tbl, old_hash);
  369. if (hashtable_chain_unzip(ht, new_tbl, old_tbl,
  370. old_hash))
  371. complete = false;
  372. unlock_buckets(new_tbl, old_tbl, old_hash);
  373. cond_resched();
  374. }
  375. }
  376. rcu_assign_pointer(ht->tbl, new_tbl);
  377. synchronize_rcu();
  378. bucket_table_free(old_tbl);
  379. return 0;
  380. }
  381. EXPORT_SYMBOL_GPL(rhashtable_expand);
  382. /**
  383. * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
  384. * @ht: the hash table to shrink
  385. *
  386. * This function may only be called in a context where it is safe to call
  387. * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
  388. *
  389. * The caller must ensure that no concurrent resizing occurs by holding
  390. * ht->mutex.
  391. *
  392. * The caller must ensure that no concurrent table mutations take place.
  393. * It is however valid to have concurrent lookups if they are RCU protected.
  394. *
  395. * It is valid to have concurrent insertions and deletions protected by per
  396. * bucket locks or concurrent RCU protected lookups and traversals.
  397. */
  398. int rhashtable_shrink(struct rhashtable *ht)
  399. {
  400. struct bucket_table *new_tbl, *tbl = rht_dereference(ht->tbl, ht);
  401. unsigned int new_hash;
  402. ASSERT_RHT_MUTEX(ht);
  403. new_tbl = bucket_table_alloc(ht, tbl->size / 2);
  404. if (new_tbl == NULL)
  405. return -ENOMEM;
  406. rcu_assign_pointer(ht->future_tbl, new_tbl);
  407. synchronize_rcu();
  408. /* Link the first entry in the old bucket to the end of the
  409. * bucket in the new table. As entries are concurrently being
  410. * added to the new table, lock down the new bucket. As we
  411. * always divide the size in half when shrinking, each bucket
  412. * in the new table maps to exactly two buckets in the old
  413. * table.
  414. */
  415. for (new_hash = 0; new_hash < new_tbl->size; new_hash++) {
  416. lock_buckets(new_tbl, tbl, new_hash);
  417. rcu_assign_pointer(*bucket_tail(new_tbl, new_hash),
  418. tbl->buckets[new_hash]);
  419. ASSERT_BUCKET_LOCK(ht, tbl, new_hash + new_tbl->size);
  420. rcu_assign_pointer(*bucket_tail(new_tbl, new_hash),
  421. tbl->buckets[new_hash + new_tbl->size]);
  422. unlock_buckets(new_tbl, tbl, new_hash);
  423. cond_resched();
  424. }
  425. /* Publish the new, valid hash table */
  426. rcu_assign_pointer(ht->tbl, new_tbl);
  427. atomic_dec(&ht->shift);
  428. /* Wait for readers. No new readers will have references to the
  429. * old hash table.
  430. */
  431. synchronize_rcu();
  432. bucket_table_free(tbl);
  433. return 0;
  434. }
  435. EXPORT_SYMBOL_GPL(rhashtable_shrink);
  436. static void rht_deferred_worker(struct work_struct *work)
  437. {
  438. struct rhashtable *ht;
  439. struct bucket_table *tbl;
  440. struct rhashtable_walker *walker;
  441. ht = container_of(work, struct rhashtable, run_work);
  442. mutex_lock(&ht->mutex);
  443. if (ht->being_destroyed)
  444. goto unlock;
  445. tbl = rht_dereference(ht->tbl, ht);
  446. list_for_each_entry(walker, &ht->walkers, list)
  447. walker->resize = true;
  448. if (rht_grow_above_75(ht, tbl->size))
  449. rhashtable_expand(ht);
  450. else if (rht_shrink_below_30(ht, tbl->size))
  451. rhashtable_shrink(ht);
  452. unlock:
  453. mutex_unlock(&ht->mutex);
  454. }
  455. static void __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
  456. struct bucket_table *tbl,
  457. const struct bucket_table *old_tbl, u32 hash)
  458. {
  459. bool no_resize_running = tbl == old_tbl;
  460. struct rhash_head *head;
  461. hash = rht_bucket_index(tbl, hash);
  462. head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
  463. ASSERT_BUCKET_LOCK(ht, tbl, hash);
  464. if (rht_is_a_nulls(head))
  465. INIT_RHT_NULLS_HEAD(obj->next, ht, hash);
  466. else
  467. RCU_INIT_POINTER(obj->next, head);
  468. rcu_assign_pointer(tbl->buckets[hash], obj);
  469. atomic_inc(&ht->nelems);
  470. if (no_resize_running && rht_grow_above_75(ht, tbl->size))
  471. schedule_work(&ht->run_work);
  472. }
  473. /**
  474. * rhashtable_insert - insert object into hash table
  475. * @ht: hash table
  476. * @obj: pointer to hash head inside object
  477. *
  478. * Will take a per bucket spinlock to protect against mutual mutations
  479. * on the same bucket. Multiple insertions may occur in parallel unless
  480. * they map to the same bucket lock.
  481. *
  482. * It is safe to call this function from atomic context.
  483. *
  484. * Will trigger an automatic deferred table resizing if the size grows
  485. * beyond the watermark indicated by grow_decision() which can be passed
  486. * to rhashtable_init().
  487. */
  488. void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj)
  489. {
  490. struct bucket_table *tbl, *old_tbl;
  491. unsigned hash;
  492. rcu_read_lock();
  493. tbl = rht_dereference_rcu(ht->future_tbl, ht);
  494. old_tbl = rht_dereference_rcu(ht->tbl, ht);
  495. hash = obj_raw_hashfn(ht, rht_obj(ht, obj));
  496. lock_buckets(tbl, old_tbl, hash);
  497. __rhashtable_insert(ht, obj, tbl, old_tbl, hash);
  498. unlock_buckets(tbl, old_tbl, hash);
  499. rcu_read_unlock();
  500. }
  501. EXPORT_SYMBOL_GPL(rhashtable_insert);
  502. /**
  503. * rhashtable_remove - remove object from hash table
  504. * @ht: hash table
  505. * @obj: pointer to hash head inside object
  506. *
  507. * Since the hash chain is single linked, the removal operation needs to
  508. * walk the bucket chain upon removal. The removal operation is thus
  509. * considerable slow if the hash table is not correctly sized.
  510. *
  511. * Will automatically shrink the table via rhashtable_expand() if the
  512. * shrink_decision function specified at rhashtable_init() returns true.
  513. *
  514. * The caller must ensure that no concurrent table mutations occur. It is
  515. * however valid to have concurrent lookups if they are RCU protected.
  516. */
  517. bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj)
  518. {
  519. struct bucket_table *tbl, *new_tbl, *old_tbl;
  520. struct rhash_head __rcu **pprev;
  521. struct rhash_head *he, *he2;
  522. unsigned int hash, new_hash;
  523. bool ret = false;
  524. rcu_read_lock();
  525. old_tbl = rht_dereference_rcu(ht->tbl, ht);
  526. tbl = new_tbl = rht_dereference_rcu(ht->future_tbl, ht);
  527. new_hash = obj_raw_hashfn(ht, rht_obj(ht, obj));
  528. lock_buckets(new_tbl, old_tbl, new_hash);
  529. restart:
  530. hash = rht_bucket_index(tbl, new_hash);
  531. pprev = &tbl->buckets[hash];
  532. rht_for_each(he, tbl, hash) {
  533. if (he != obj) {
  534. pprev = &he->next;
  535. continue;
  536. }
  537. ASSERT_BUCKET_LOCK(ht, tbl, hash);
  538. if (old_tbl->size > new_tbl->size && tbl == old_tbl &&
  539. !rht_is_a_nulls(obj->next) &&
  540. head_hashfn(ht, tbl, obj->next) != hash) {
  541. rcu_assign_pointer(*pprev, (struct rhash_head *) rht_marker(ht, hash));
  542. } else if (unlikely(old_tbl->size < new_tbl->size && tbl == new_tbl)) {
  543. rht_for_each_continue(he2, obj->next, tbl, hash) {
  544. if (head_hashfn(ht, tbl, he2) == hash) {
  545. rcu_assign_pointer(*pprev, he2);
  546. goto found;
  547. }
  548. }
  549. rcu_assign_pointer(*pprev, (struct rhash_head *) rht_marker(ht, hash));
  550. } else {
  551. rcu_assign_pointer(*pprev, obj->next);
  552. }
  553. found:
  554. ret = true;
  555. break;
  556. }
  557. /* The entry may be linked in either 'tbl', 'future_tbl', or both.
  558. * 'future_tbl' only exists for a short period of time during
  559. * resizing. Thus traversing both is fine and the added cost is
  560. * very rare.
  561. */
  562. if (tbl != old_tbl) {
  563. tbl = old_tbl;
  564. goto restart;
  565. }
  566. unlock_buckets(new_tbl, old_tbl, new_hash);
  567. if (ret) {
  568. bool no_resize_running = new_tbl == old_tbl;
  569. atomic_dec(&ht->nelems);
  570. if (no_resize_running && rht_shrink_below_30(ht, new_tbl->size))
  571. schedule_work(&ht->run_work);
  572. }
  573. rcu_read_unlock();
  574. return ret;
  575. }
  576. EXPORT_SYMBOL_GPL(rhashtable_remove);
  577. struct rhashtable_compare_arg {
  578. struct rhashtable *ht;
  579. const void *key;
  580. };
  581. static bool rhashtable_compare(void *ptr, void *arg)
  582. {
  583. struct rhashtable_compare_arg *x = arg;
  584. struct rhashtable *ht = x->ht;
  585. return !memcmp(ptr + ht->p.key_offset, x->key, ht->p.key_len);
  586. }
  587. /**
  588. * rhashtable_lookup - lookup key in hash table
  589. * @ht: hash table
  590. * @key: pointer to key
  591. *
  592. * Computes the hash value for the key and traverses the bucket chain looking
  593. * for a entry with an identical key. The first matching entry is returned.
  594. *
  595. * This lookup function may only be used for fixed key hash table (key_len
  596. * parameter set). It will BUG() if used inappropriately.
  597. *
  598. * Lookups may occur in parallel with hashtable mutations and resizing.
  599. */
  600. void *rhashtable_lookup(struct rhashtable *ht, const void *key)
  601. {
  602. struct rhashtable_compare_arg arg = {
  603. .ht = ht,
  604. .key = key,
  605. };
  606. BUG_ON(!ht->p.key_len);
  607. return rhashtable_lookup_compare(ht, key, &rhashtable_compare, &arg);
  608. }
  609. EXPORT_SYMBOL_GPL(rhashtable_lookup);
  610. /**
  611. * rhashtable_lookup_compare - search hash table with compare function
  612. * @ht: hash table
  613. * @key: the pointer to the key
  614. * @compare: compare function, must return true on match
  615. * @arg: argument passed on to compare function
  616. *
  617. * Traverses the bucket chain behind the provided hash value and calls the
  618. * specified compare function for each entry.
  619. *
  620. * Lookups may occur in parallel with hashtable mutations and resizing.
  621. *
  622. * Returns the first entry on which the compare function returned true.
  623. */
  624. void *rhashtable_lookup_compare(struct rhashtable *ht, const void *key,
  625. bool (*compare)(void *, void *), void *arg)
  626. {
  627. const struct bucket_table *tbl, *old_tbl;
  628. struct rhash_head *he;
  629. u32 hash;
  630. rcu_read_lock();
  631. old_tbl = rht_dereference_rcu(ht->tbl, ht);
  632. tbl = rht_dereference_rcu(ht->future_tbl, ht);
  633. hash = key_hashfn(ht, key, ht->p.key_len);
  634. restart:
  635. rht_for_each_rcu(he, tbl, rht_bucket_index(tbl, hash)) {
  636. if (!compare(rht_obj(ht, he), arg))
  637. continue;
  638. rcu_read_unlock();
  639. return rht_obj(ht, he);
  640. }
  641. if (unlikely(tbl != old_tbl)) {
  642. tbl = old_tbl;
  643. goto restart;
  644. }
  645. rcu_read_unlock();
  646. return NULL;
  647. }
  648. EXPORT_SYMBOL_GPL(rhashtable_lookup_compare);
  649. /**
  650. * rhashtable_lookup_insert - lookup and insert object into hash table
  651. * @ht: hash table
  652. * @obj: pointer to hash head inside object
  653. *
  654. * Locks down the bucket chain in both the old and new table if a resize
  655. * is in progress to ensure that writers can't remove from the old table
  656. * and can't insert to the new table during the atomic operation of search
  657. * and insertion. Searches for duplicates in both the old and new table if
  658. * a resize is in progress.
  659. *
  660. * This lookup function may only be used for fixed key hash table (key_len
  661. * parameter set). It will BUG() if used inappropriately.
  662. *
  663. * It is safe to call this function from atomic context.
  664. *
  665. * Will trigger an automatic deferred table resizing if the size grows
  666. * beyond the watermark indicated by grow_decision() which can be passed
  667. * to rhashtable_init().
  668. */
  669. bool rhashtable_lookup_insert(struct rhashtable *ht, struct rhash_head *obj)
  670. {
  671. struct rhashtable_compare_arg arg = {
  672. .ht = ht,
  673. .key = rht_obj(ht, obj) + ht->p.key_offset,
  674. };
  675. BUG_ON(!ht->p.key_len);
  676. return rhashtable_lookup_compare_insert(ht, obj, &rhashtable_compare,
  677. &arg);
  678. }
  679. EXPORT_SYMBOL_GPL(rhashtable_lookup_insert);
  680. /**
  681. * rhashtable_lookup_compare_insert - search and insert object to hash table
  682. * with compare function
  683. * @ht: hash table
  684. * @obj: pointer to hash head inside object
  685. * @compare: compare function, must return true on match
  686. * @arg: argument passed on to compare function
  687. *
  688. * Locks down the bucket chain in both the old and new table if a resize
  689. * is in progress to ensure that writers can't remove from the old table
  690. * and can't insert to the new table during the atomic operation of search
  691. * and insertion. Searches for duplicates in both the old and new table if
  692. * a resize is in progress.
  693. *
  694. * Lookups may occur in parallel with hashtable mutations and resizing.
  695. *
  696. * Will trigger an automatic deferred table resizing if the size grows
  697. * beyond the watermark indicated by grow_decision() which can be passed
  698. * to rhashtable_init().
  699. */
  700. bool rhashtable_lookup_compare_insert(struct rhashtable *ht,
  701. struct rhash_head *obj,
  702. bool (*compare)(void *, void *),
  703. void *arg)
  704. {
  705. struct bucket_table *new_tbl, *old_tbl;
  706. u32 new_hash;
  707. bool success = true;
  708. BUG_ON(!ht->p.key_len);
  709. rcu_read_lock();
  710. old_tbl = rht_dereference_rcu(ht->tbl, ht);
  711. new_tbl = rht_dereference_rcu(ht->future_tbl, ht);
  712. new_hash = obj_raw_hashfn(ht, rht_obj(ht, obj));
  713. lock_buckets(new_tbl, old_tbl, new_hash);
  714. if (rhashtable_lookup_compare(ht, rht_obj(ht, obj) + ht->p.key_offset,
  715. compare, arg)) {
  716. success = false;
  717. goto exit;
  718. }
  719. __rhashtable_insert(ht, obj, new_tbl, old_tbl, new_hash);
  720. exit:
  721. unlock_buckets(new_tbl, old_tbl, new_hash);
  722. rcu_read_unlock();
  723. return success;
  724. }
  725. EXPORT_SYMBOL_GPL(rhashtable_lookup_compare_insert);
  726. /**
  727. * rhashtable_walk_init - Initialise an iterator
  728. * @ht: Table to walk over
  729. * @iter: Hash table Iterator
  730. *
  731. * This function prepares a hash table walk.
  732. *
  733. * Note that if you restart a walk after rhashtable_walk_stop you
  734. * may see the same object twice. Also, you may miss objects if
  735. * there are removals in between rhashtable_walk_stop and the next
  736. * call to rhashtable_walk_start.
  737. *
  738. * For a completely stable walk you should construct your own data
  739. * structure outside the hash table.
  740. *
  741. * This function may sleep so you must not call it from interrupt
  742. * context or with spin locks held.
  743. *
  744. * You must call rhashtable_walk_exit if this function returns
  745. * successfully.
  746. */
  747. int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter)
  748. {
  749. iter->ht = ht;
  750. iter->p = NULL;
  751. iter->slot = 0;
  752. iter->skip = 0;
  753. iter->walker = kmalloc(sizeof(*iter->walker), GFP_KERNEL);
  754. if (!iter->walker)
  755. return -ENOMEM;
  756. INIT_LIST_HEAD(&iter->walker->list);
  757. iter->walker->resize = false;
  758. mutex_lock(&ht->mutex);
  759. list_add(&iter->walker->list, &ht->walkers);
  760. mutex_unlock(&ht->mutex);
  761. return 0;
  762. }
  763. EXPORT_SYMBOL_GPL(rhashtable_walk_init);
  764. /**
  765. * rhashtable_walk_exit - Free an iterator
  766. * @iter: Hash table Iterator
  767. *
  768. * This function frees resources allocated by rhashtable_walk_init.
  769. */
  770. void rhashtable_walk_exit(struct rhashtable_iter *iter)
  771. {
  772. mutex_lock(&iter->ht->mutex);
  773. list_del(&iter->walker->list);
  774. mutex_unlock(&iter->ht->mutex);
  775. kfree(iter->walker);
  776. }
  777. EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
  778. /**
  779. * rhashtable_walk_start - Start a hash table walk
  780. * @iter: Hash table iterator
  781. *
  782. * Start a hash table walk. Note that we take the RCU lock in all
  783. * cases including when we return an error. So you must always call
  784. * rhashtable_walk_stop to clean up.
  785. *
  786. * Returns zero if successful.
  787. *
  788. * Returns -EAGAIN if resize event occured. Note that the iterator
  789. * will rewind back to the beginning and you may use it immediately
  790. * by calling rhashtable_walk_next.
  791. */
  792. int rhashtable_walk_start(struct rhashtable_iter *iter)
  793. {
  794. rcu_read_lock();
  795. if (iter->walker->resize) {
  796. iter->slot = 0;
  797. iter->skip = 0;
  798. iter->walker->resize = false;
  799. return -EAGAIN;
  800. }
  801. return 0;
  802. }
  803. EXPORT_SYMBOL_GPL(rhashtable_walk_start);
  804. /**
  805. * rhashtable_walk_next - Return the next object and advance the iterator
  806. * @iter: Hash table iterator
  807. *
  808. * Note that you must call rhashtable_walk_stop when you are finished
  809. * with the walk.
  810. *
  811. * Returns the next object or NULL when the end of the table is reached.
  812. *
  813. * Returns -EAGAIN if resize event occured. Note that the iterator
  814. * will rewind back to the beginning and you may continue to use it.
  815. */
  816. void *rhashtable_walk_next(struct rhashtable_iter *iter)
  817. {
  818. const struct bucket_table *tbl;
  819. struct rhashtable *ht = iter->ht;
  820. struct rhash_head *p = iter->p;
  821. void *obj = NULL;
  822. tbl = rht_dereference_rcu(ht->tbl, ht);
  823. if (p) {
  824. p = rht_dereference_bucket_rcu(p->next, tbl, iter->slot);
  825. goto next;
  826. }
  827. for (; iter->slot < tbl->size; iter->slot++) {
  828. int skip = iter->skip;
  829. rht_for_each_rcu(p, tbl, iter->slot) {
  830. if (!skip)
  831. break;
  832. skip--;
  833. }
  834. next:
  835. if (!rht_is_a_nulls(p)) {
  836. iter->skip++;
  837. iter->p = p;
  838. obj = rht_obj(ht, p);
  839. goto out;
  840. }
  841. iter->skip = 0;
  842. }
  843. iter->p = NULL;
  844. out:
  845. if (iter->walker->resize) {
  846. iter->p = NULL;
  847. iter->slot = 0;
  848. iter->skip = 0;
  849. iter->walker->resize = false;
  850. return ERR_PTR(-EAGAIN);
  851. }
  852. return obj;
  853. }
  854. EXPORT_SYMBOL_GPL(rhashtable_walk_next);
  855. /**
  856. * rhashtable_walk_stop - Finish a hash table walk
  857. * @iter: Hash table iterator
  858. *
  859. * Finish a hash table walk.
  860. */
  861. void rhashtable_walk_stop(struct rhashtable_iter *iter)
  862. {
  863. rcu_read_unlock();
  864. iter->p = NULL;
  865. }
  866. EXPORT_SYMBOL_GPL(rhashtable_walk_stop);
  867. static size_t rounded_hashtable_size(struct rhashtable_params *params)
  868. {
  869. return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
  870. 1UL << params->min_shift);
  871. }
  872. /**
  873. * rhashtable_init - initialize a new hash table
  874. * @ht: hash table to be initialized
  875. * @params: configuration parameters
  876. *
  877. * Initializes a new hash table based on the provided configuration
  878. * parameters. A table can be configured either with a variable or
  879. * fixed length key:
  880. *
  881. * Configuration Example 1: Fixed length keys
  882. * struct test_obj {
  883. * int key;
  884. * void * my_member;
  885. * struct rhash_head node;
  886. * };
  887. *
  888. * struct rhashtable_params params = {
  889. * .head_offset = offsetof(struct test_obj, node),
  890. * .key_offset = offsetof(struct test_obj, key),
  891. * .key_len = sizeof(int),
  892. * .hashfn = jhash,
  893. * .nulls_base = (1U << RHT_BASE_SHIFT),
  894. * };
  895. *
  896. * Configuration Example 2: Variable length keys
  897. * struct test_obj {
  898. * [...]
  899. * struct rhash_head node;
  900. * };
  901. *
  902. * u32 my_hash_fn(const void *data, u32 seed)
  903. * {
  904. * struct test_obj *obj = data;
  905. *
  906. * return [... hash ...];
  907. * }
  908. *
  909. * struct rhashtable_params params = {
  910. * .head_offset = offsetof(struct test_obj, node),
  911. * .hashfn = jhash,
  912. * .obj_hashfn = my_hash_fn,
  913. * };
  914. */
  915. int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params)
  916. {
  917. struct bucket_table *tbl;
  918. size_t size;
  919. size = HASH_DEFAULT_SIZE;
  920. if ((params->key_len && !params->hashfn) ||
  921. (!params->key_len && !params->obj_hashfn))
  922. return -EINVAL;
  923. if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))
  924. return -EINVAL;
  925. params->min_shift = max_t(size_t, params->min_shift,
  926. ilog2(HASH_MIN_SIZE));
  927. if (params->nelem_hint)
  928. size = rounded_hashtable_size(params);
  929. memset(ht, 0, sizeof(*ht));
  930. mutex_init(&ht->mutex);
  931. memcpy(&ht->p, params, sizeof(*params));
  932. INIT_LIST_HEAD(&ht->walkers);
  933. if (params->locks_mul)
  934. ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
  935. else
  936. ht->p.locks_mul = BUCKET_LOCKS_PER_CPU;
  937. tbl = bucket_table_alloc(ht, size);
  938. if (tbl == NULL)
  939. return -ENOMEM;
  940. atomic_set(&ht->nelems, 0);
  941. atomic_set(&ht->shift, ilog2(tbl->size));
  942. RCU_INIT_POINTER(ht->tbl, tbl);
  943. RCU_INIT_POINTER(ht->future_tbl, tbl);
  944. if (!ht->p.hash_rnd)
  945. get_random_bytes(&ht->p.hash_rnd, sizeof(ht->p.hash_rnd));
  946. INIT_WORK(&ht->run_work, rht_deferred_worker);
  947. return 0;
  948. }
  949. EXPORT_SYMBOL_GPL(rhashtable_init);
  950. /**
  951. * rhashtable_destroy - destroy hash table
  952. * @ht: the hash table to destroy
  953. *
  954. * Frees the bucket array. This function is not rcu safe, therefore the caller
  955. * has to make sure that no resizing may happen by unpublishing the hashtable
  956. * and waiting for the quiescent cycle before releasing the bucket array.
  957. */
  958. void rhashtable_destroy(struct rhashtable *ht)
  959. {
  960. ht->being_destroyed = true;
  961. cancel_work_sync(&ht->run_work);
  962. mutex_lock(&ht->mutex);
  963. bucket_table_free(rht_dereference(ht->tbl, ht));
  964. mutex_unlock(&ht->mutex);
  965. }
  966. EXPORT_SYMBOL_GPL(rhashtable_destroy);