rhashtable.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092
  1. /*
  2. * Resizable, Scalable, Concurrent Hash Table
  3. *
  4. * Copyright (c) 2014 Thomas Graf <tgraf@suug.ch>
  5. * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
  6. *
  7. * Based on the following paper:
  8. * https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf
  9. *
  10. * Code partially derived from nft_hash
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License version 2 as
  14. * published by the Free Software Foundation.
  15. */
  16. #include <linux/kernel.h>
  17. #include <linux/init.h>
  18. #include <linux/log2.h>
  19. #include <linux/slab.h>
  20. #include <linux/vmalloc.h>
  21. #include <linux/mm.h>
  22. #include <linux/jhash.h>
  23. #include <linux/random.h>
  24. #include <linux/rhashtable.h>
  25. #define HASH_DEFAULT_SIZE 64UL
  26. #define HASH_MIN_SIZE 4UL
  27. #define BUCKET_LOCKS_PER_CPU 128UL
  28. /* Base bits plus 1 bit for nulls marker */
  29. #define HASH_RESERVED_SPACE (RHT_BASE_BITS + 1)
  30. enum {
  31. RHT_LOCK_NORMAL,
  32. RHT_LOCK_NESTED,
  33. RHT_LOCK_NESTED2,
  34. };
  35. /* The bucket lock is selected based on the hash and protects mutations
  36. * on a group of hash buckets.
  37. *
  38. * IMPORTANT: When holding the bucket lock of both the old and new table
  39. * during expansions and shrinking, the old bucket lock must always be
  40. * acquired first.
  41. */
  42. static spinlock_t *bucket_lock(const struct bucket_table *tbl, u32 hash)
  43. {
  44. return &tbl->locks[hash & tbl->locks_mask];
  45. }
  46. #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
  47. #define ASSERT_BUCKET_LOCK(TBL, HASH) \
  48. BUG_ON(!lockdep_rht_bucket_is_held(TBL, HASH))
  49. #ifdef CONFIG_PROVE_LOCKING
  50. int lockdep_rht_mutex_is_held(struct rhashtable *ht)
  51. {
  52. return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1;
  53. }
  54. EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
  55. int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
  56. {
  57. spinlock_t *lock = bucket_lock(tbl, hash);
  58. return (debug_locks) ? lockdep_is_held(lock) : 1;
  59. }
  60. EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
  61. #endif
  62. static void *rht_obj(const struct rhashtable *ht, const struct rhash_head *he)
  63. {
  64. return (void *) he - ht->p.head_offset;
  65. }
  66. static u32 rht_bucket_index(const struct bucket_table *tbl, u32 hash)
  67. {
  68. return hash & (tbl->size - 1);
  69. }
  70. static u32 obj_raw_hashfn(const struct rhashtable *ht, const void *ptr)
  71. {
  72. u32 hash;
  73. if (unlikely(!ht->p.key_len))
  74. hash = ht->p.obj_hashfn(ptr, ht->p.hash_rnd);
  75. else
  76. hash = ht->p.hashfn(ptr + ht->p.key_offset, ht->p.key_len,
  77. ht->p.hash_rnd);
  78. return hash >> HASH_RESERVED_SPACE;
  79. }
  80. static u32 key_hashfn(struct rhashtable *ht, const void *key, u32 len)
  81. {
  82. struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
  83. u32 hash;
  84. hash = ht->p.hashfn(key, len, ht->p.hash_rnd);
  85. hash >>= HASH_RESERVED_SPACE;
  86. return rht_bucket_index(tbl, hash);
  87. }
  88. static u32 head_hashfn(const struct rhashtable *ht,
  89. const struct bucket_table *tbl,
  90. const struct rhash_head *he)
  91. {
  92. return rht_bucket_index(tbl, obj_raw_hashfn(ht, rht_obj(ht, he)));
  93. }
  94. static struct rhash_head __rcu **bucket_tail(struct bucket_table *tbl, u32 n)
  95. {
  96. struct rhash_head __rcu **pprev;
  97. for (pprev = &tbl->buckets[n];
  98. !rht_is_a_nulls(rht_dereference_bucket(*pprev, tbl, n));
  99. pprev = &rht_dereference_bucket(*pprev, tbl, n)->next)
  100. ;
  101. return pprev;
  102. }
  103. static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl)
  104. {
  105. unsigned int i, size;
  106. #if defined(CONFIG_PROVE_LOCKING)
  107. unsigned int nr_pcpus = 2;
  108. #else
  109. unsigned int nr_pcpus = num_possible_cpus();
  110. #endif
  111. nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL);
  112. size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul);
  113. /* Never allocate more than one lock per bucket */
  114. size = min_t(unsigned int, size, tbl->size);
  115. if (sizeof(spinlock_t) != 0) {
  116. #ifdef CONFIG_NUMA
  117. if (size * sizeof(spinlock_t) > PAGE_SIZE)
  118. tbl->locks = vmalloc(size * sizeof(spinlock_t));
  119. else
  120. #endif
  121. tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
  122. GFP_KERNEL);
  123. if (!tbl->locks)
  124. return -ENOMEM;
  125. for (i = 0; i < size; i++)
  126. spin_lock_init(&tbl->locks[i]);
  127. }
  128. tbl->locks_mask = size - 1;
  129. return 0;
  130. }
  131. static void bucket_table_free(const struct bucket_table *tbl)
  132. {
  133. if (tbl)
  134. kvfree(tbl->locks);
  135. kvfree(tbl);
  136. }
  137. static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
  138. size_t nbuckets)
  139. {
  140. struct bucket_table *tbl;
  141. size_t size;
  142. int i;
  143. size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
  144. tbl = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
  145. if (tbl == NULL)
  146. tbl = vzalloc(size);
  147. if (tbl == NULL)
  148. return NULL;
  149. tbl->size = nbuckets;
  150. if (alloc_bucket_locks(ht, tbl) < 0) {
  151. bucket_table_free(tbl);
  152. return NULL;
  153. }
  154. for (i = 0; i < nbuckets; i++)
  155. INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i);
  156. return tbl;
  157. }
  158. /**
  159. * rht_grow_above_75 - returns true if nelems > 0.75 * table-size
  160. * @ht: hash table
  161. * @new_size: new table size
  162. */
  163. bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size)
  164. {
  165. /* Expand table when exceeding 75% load */
  166. return atomic_read(&ht->nelems) > (new_size / 4 * 3);
  167. }
  168. EXPORT_SYMBOL_GPL(rht_grow_above_75);
  169. /**
  170. * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
  171. * @ht: hash table
  172. * @new_size: new table size
  173. */
  174. bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size)
  175. {
  176. /* Shrink table beneath 30% load */
  177. return atomic_read(&ht->nelems) < (new_size * 3 / 10);
  178. }
  179. EXPORT_SYMBOL_GPL(rht_shrink_below_30);
  180. static void hashtable_chain_unzip(const struct rhashtable *ht,
  181. const struct bucket_table *new_tbl,
  182. struct bucket_table *old_tbl,
  183. size_t old_hash)
  184. {
  185. struct rhash_head *he, *p, *next;
  186. spinlock_t *new_bucket_lock, *new_bucket_lock2 = NULL;
  187. unsigned int new_hash, new_hash2;
  188. ASSERT_BUCKET_LOCK(old_tbl, old_hash);
  189. /* Old bucket empty, no work needed. */
  190. p = rht_dereference_bucket(old_tbl->buckets[old_hash], old_tbl,
  191. old_hash);
  192. if (rht_is_a_nulls(p))
  193. return;
  194. new_hash = new_hash2 = head_hashfn(ht, new_tbl, p);
  195. new_bucket_lock = bucket_lock(new_tbl, new_hash);
  196. /* Advance the old bucket pointer one or more times until it
  197. * reaches a node that doesn't hash to the same bucket as the
  198. * previous node p. Call the previous node p;
  199. */
  200. rht_for_each_continue(he, p->next, old_tbl, old_hash) {
  201. new_hash2 = head_hashfn(ht, new_tbl, he);
  202. if (new_hash != new_hash2)
  203. break;
  204. p = he;
  205. }
  206. rcu_assign_pointer(old_tbl->buckets[old_hash], p->next);
  207. spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED);
  208. /* If we have encountered an entry that maps to a different bucket in
  209. * the new table, lock down that bucket as well as we might cut off
  210. * the end of the chain.
  211. */
  212. new_bucket_lock2 = bucket_lock(new_tbl, new_hash);
  213. if (new_bucket_lock != new_bucket_lock2)
  214. spin_lock_bh_nested(new_bucket_lock2, RHT_LOCK_NESTED2);
  215. /* Find the subsequent node which does hash to the same
  216. * bucket as node P, or NULL if no such node exists.
  217. */
  218. INIT_RHT_NULLS_HEAD(next, ht, old_hash);
  219. if (!rht_is_a_nulls(he)) {
  220. rht_for_each_continue(he, he->next, old_tbl, old_hash) {
  221. if (head_hashfn(ht, new_tbl, he) == new_hash) {
  222. next = he;
  223. break;
  224. }
  225. }
  226. }
  227. /* Set p's next pointer to that subsequent node pointer,
  228. * bypassing the nodes which do not hash to p's bucket
  229. */
  230. rcu_assign_pointer(p->next, next);
  231. if (new_bucket_lock != new_bucket_lock2)
  232. spin_unlock_bh(new_bucket_lock2);
  233. spin_unlock_bh(new_bucket_lock);
  234. }
  235. static void link_old_to_new(struct bucket_table *new_tbl,
  236. unsigned int new_hash, struct rhash_head *entry)
  237. {
  238. spinlock_t *new_bucket_lock;
  239. new_bucket_lock = bucket_lock(new_tbl, new_hash);
  240. spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED);
  241. rcu_assign_pointer(*bucket_tail(new_tbl, new_hash), entry);
  242. spin_unlock_bh(new_bucket_lock);
  243. }
  244. /**
  245. * rhashtable_expand - Expand hash table while allowing concurrent lookups
  246. * @ht: the hash table to expand
  247. *
  248. * A secondary bucket array is allocated and the hash entries are migrated
  249. * while keeping them on both lists until the end of the RCU grace period.
  250. *
  251. * This function may only be called in a context where it is safe to call
  252. * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
  253. *
  254. * The caller must ensure that no concurrent resizing occurs by holding
  255. * ht->mutex.
  256. *
  257. * It is valid to have concurrent insertions and deletions protected by per
  258. * bucket locks or concurrent RCU protected lookups and traversals.
  259. */
  260. int rhashtable_expand(struct rhashtable *ht)
  261. {
  262. struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
  263. struct rhash_head *he;
  264. spinlock_t *old_bucket_lock;
  265. unsigned int new_hash, old_hash;
  266. bool complete = false;
  267. ASSERT_RHT_MUTEX(ht);
  268. if (ht->p.max_shift && ht->shift >= ht->p.max_shift)
  269. return 0;
  270. new_tbl = bucket_table_alloc(ht, old_tbl->size * 2);
  271. if (new_tbl == NULL)
  272. return -ENOMEM;
  273. ht->shift++;
  274. /* Make insertions go into the new, empty table right away. Deletions
  275. * and lookups will be attempted in both tables until we synchronize.
  276. * The synchronize_rcu() guarantees for the new table to be picked up
  277. * so no new additions go into the old table while we relink.
  278. */
  279. rcu_assign_pointer(ht->future_tbl, new_tbl);
  280. synchronize_rcu();
  281. /* For each new bucket, search the corresponding old bucket for the
  282. * first entry that hashes to the new bucket, and link the end of
  283. * newly formed bucket chain (containing entries added to future
  284. * table) to that entry. Since all the entries which will end up in
  285. * the new bucket appear in the same old bucket, this constructs an
  286. * entirely valid new hash table, but with multiple buckets
  287. * "zipped" together into a single imprecise chain.
  288. */
  289. for (new_hash = 0; new_hash < new_tbl->size; new_hash++) {
  290. old_hash = rht_bucket_index(old_tbl, new_hash);
  291. old_bucket_lock = bucket_lock(old_tbl, old_hash);
  292. spin_lock_bh(old_bucket_lock);
  293. rht_for_each(he, old_tbl, old_hash) {
  294. if (head_hashfn(ht, new_tbl, he) == new_hash) {
  295. link_old_to_new(new_tbl, new_hash, he);
  296. break;
  297. }
  298. }
  299. spin_unlock_bh(old_bucket_lock);
  300. }
  301. /* Publish the new table pointer. Lookups may now traverse
  302. * the new table, but they will not benefit from any
  303. * additional efficiency until later steps unzip the buckets.
  304. */
  305. rcu_assign_pointer(ht->tbl, new_tbl);
  306. /* Unzip interleaved hash chains */
  307. while (!complete && !ht->being_destroyed) {
  308. /* Wait for readers. All new readers will see the new
  309. * table, and thus no references to the old table will
  310. * remain.
  311. */
  312. synchronize_rcu();
  313. /* For each bucket in the old table (each of which
  314. * contains items from multiple buckets of the new
  315. * table): ...
  316. */
  317. complete = true;
  318. for (old_hash = 0; old_hash < old_tbl->size; old_hash++) {
  319. struct rhash_head *head;
  320. old_bucket_lock = bucket_lock(old_tbl, old_hash);
  321. spin_lock_bh(old_bucket_lock);
  322. hashtable_chain_unzip(ht, new_tbl, old_tbl, old_hash);
  323. head = rht_dereference_bucket(old_tbl->buckets[old_hash],
  324. old_tbl, old_hash);
  325. if (!rht_is_a_nulls(head))
  326. complete = false;
  327. spin_unlock_bh(old_bucket_lock);
  328. }
  329. }
  330. bucket_table_free(old_tbl);
  331. return 0;
  332. }
  333. EXPORT_SYMBOL_GPL(rhashtable_expand);
  334. /**
  335. * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
  336. * @ht: the hash table to shrink
  337. *
  338. * This function may only be called in a context where it is safe to call
  339. * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
  340. *
  341. * The caller must ensure that no concurrent resizing occurs by holding
  342. * ht->mutex.
  343. *
  344. * The caller must ensure that no concurrent table mutations take place.
  345. * It is however valid to have concurrent lookups if they are RCU protected.
  346. *
  347. * It is valid to have concurrent insertions and deletions protected by per
  348. * bucket locks or concurrent RCU protected lookups and traversals.
  349. */
  350. int rhashtable_shrink(struct rhashtable *ht)
  351. {
  352. struct bucket_table *new_tbl, *tbl = rht_dereference(ht->tbl, ht);
  353. spinlock_t *new_bucket_lock, *old_bucket_lock1, *old_bucket_lock2;
  354. unsigned int new_hash;
  355. ASSERT_RHT_MUTEX(ht);
  356. if (ht->shift <= ht->p.min_shift)
  357. return 0;
  358. new_tbl = bucket_table_alloc(ht, tbl->size / 2);
  359. if (new_tbl == NULL)
  360. return -ENOMEM;
  361. rcu_assign_pointer(ht->future_tbl, new_tbl);
  362. synchronize_rcu();
  363. /* Link the first entry in the old bucket to the end of the
  364. * bucket in the new table. As entries are concurrently being
  365. * added to the new table, lock down the new bucket. As we
  366. * always divide the size in half when shrinking, each bucket
  367. * in the new table maps to exactly two buckets in the old
  368. * table.
  369. *
  370. * As removals can occur concurrently on the old table, we need
  371. * to lock down both matching buckets in the old table.
  372. */
  373. for (new_hash = 0; new_hash < new_tbl->size; new_hash++) {
  374. old_bucket_lock1 = bucket_lock(tbl, new_hash);
  375. old_bucket_lock2 = bucket_lock(tbl, new_hash + new_tbl->size);
  376. new_bucket_lock = bucket_lock(new_tbl, new_hash);
  377. spin_lock_bh(old_bucket_lock1);
  378. spin_lock_bh_nested(old_bucket_lock2, RHT_LOCK_NESTED);
  379. spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED2);
  380. rcu_assign_pointer(*bucket_tail(new_tbl, new_hash),
  381. tbl->buckets[new_hash]);
  382. rcu_assign_pointer(*bucket_tail(new_tbl, new_hash),
  383. tbl->buckets[new_hash + new_tbl->size]);
  384. spin_unlock_bh(new_bucket_lock);
  385. spin_unlock_bh(old_bucket_lock2);
  386. spin_unlock_bh(old_bucket_lock1);
  387. }
  388. /* Publish the new, valid hash table */
  389. rcu_assign_pointer(ht->tbl, new_tbl);
  390. ht->shift--;
  391. /* Wait for readers. No new readers will have references to the
  392. * old hash table.
  393. */
  394. synchronize_rcu();
  395. bucket_table_free(tbl);
  396. return 0;
  397. }
  398. EXPORT_SYMBOL_GPL(rhashtable_shrink);
  399. static void rht_deferred_worker(struct work_struct *work)
  400. {
  401. struct rhashtable *ht;
  402. struct bucket_table *tbl;
  403. ht = container_of(work, struct rhashtable, run_work.work);
  404. mutex_lock(&ht->mutex);
  405. tbl = rht_dereference(ht->tbl, ht);
  406. if (ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size))
  407. rhashtable_expand(ht);
  408. else if (ht->p.shrink_decision && ht->p.shrink_decision(ht, tbl->size))
  409. rhashtable_shrink(ht);
  410. mutex_unlock(&ht->mutex);
  411. }
  412. static void rhashtable_wakeup_worker(struct rhashtable *ht)
  413. {
  414. struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
  415. struct bucket_table *new_tbl = rht_dereference_rcu(ht->future_tbl, ht);
  416. size_t size = tbl->size;
  417. /* Only adjust the table if no resizing is currently in progress. */
  418. if (tbl == new_tbl &&
  419. ((ht->p.grow_decision && ht->p.grow_decision(ht, size)) ||
  420. (ht->p.shrink_decision && ht->p.shrink_decision(ht, size))))
  421. schedule_delayed_work(&ht->run_work, 0);
  422. }
  423. static void __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
  424. struct bucket_table *tbl, u32 hash)
  425. {
  426. struct rhash_head *head = rht_dereference_bucket(tbl->buckets[hash],
  427. tbl, hash);
  428. if (rht_is_a_nulls(head))
  429. INIT_RHT_NULLS_HEAD(obj->next, ht, hash);
  430. else
  431. RCU_INIT_POINTER(obj->next, head);
  432. rcu_assign_pointer(tbl->buckets[hash], obj);
  433. atomic_inc(&ht->nelems);
  434. rhashtable_wakeup_worker(ht);
  435. }
  436. /**
  437. * rhashtable_insert - insert object into hash table
  438. * @ht: hash table
  439. * @obj: pointer to hash head inside object
  440. *
  441. * Will take a per bucket spinlock to protect against mutual mutations
  442. * on the same bucket. Multiple insertions may occur in parallel unless
  443. * they map to the same bucket lock.
  444. *
  445. * It is safe to call this function from atomic context.
  446. *
  447. * Will trigger an automatic deferred table resizing if the size grows
  448. * beyond the watermark indicated by grow_decision() which can be passed
  449. * to rhashtable_init().
  450. */
  451. void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj)
  452. {
  453. struct bucket_table *tbl;
  454. spinlock_t *lock;
  455. unsigned hash;
  456. rcu_read_lock();
  457. tbl = rht_dereference_rcu(ht->future_tbl, ht);
  458. hash = head_hashfn(ht, tbl, obj);
  459. lock = bucket_lock(tbl, hash);
  460. spin_lock_bh(lock);
  461. __rhashtable_insert(ht, obj, tbl, hash);
  462. spin_unlock_bh(lock);
  463. rcu_read_unlock();
  464. }
  465. EXPORT_SYMBOL_GPL(rhashtable_insert);
  466. /**
  467. * rhashtable_remove - remove object from hash table
  468. * @ht: hash table
  469. * @obj: pointer to hash head inside object
  470. *
  471. * Since the hash chain is single linked, the removal operation needs to
  472. * walk the bucket chain upon removal. The removal operation is thus
  473. * considerable slow if the hash table is not correctly sized.
  474. *
  475. * Will automatically shrink the table via rhashtable_expand() if the
  476. * shrink_decision function specified at rhashtable_init() returns true.
  477. *
  478. * The caller must ensure that no concurrent table mutations occur. It is
  479. * however valid to have concurrent lookups if they are RCU protected.
  480. */
  481. bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj)
  482. {
  483. struct bucket_table *tbl;
  484. struct rhash_head __rcu **pprev;
  485. struct rhash_head *he;
  486. spinlock_t *lock;
  487. unsigned int hash;
  488. rcu_read_lock();
  489. tbl = rht_dereference_rcu(ht->tbl, ht);
  490. hash = head_hashfn(ht, tbl, obj);
  491. lock = bucket_lock(tbl, hash);
  492. spin_lock_bh(lock);
  493. restart:
  494. pprev = &tbl->buckets[hash];
  495. rht_for_each(he, tbl, hash) {
  496. if (he != obj) {
  497. pprev = &he->next;
  498. continue;
  499. }
  500. rcu_assign_pointer(*pprev, obj->next);
  501. atomic_dec(&ht->nelems);
  502. spin_unlock_bh(lock);
  503. rhashtable_wakeup_worker(ht);
  504. rcu_read_unlock();
  505. return true;
  506. }
  507. if (tbl != rht_dereference_rcu(ht->future_tbl, ht)) {
  508. spin_unlock_bh(lock);
  509. tbl = rht_dereference_rcu(ht->future_tbl, ht);
  510. hash = head_hashfn(ht, tbl, obj);
  511. lock = bucket_lock(tbl, hash);
  512. spin_lock_bh(lock);
  513. goto restart;
  514. }
  515. spin_unlock_bh(lock);
  516. rcu_read_unlock();
  517. return false;
  518. }
  519. EXPORT_SYMBOL_GPL(rhashtable_remove);
  520. struct rhashtable_compare_arg {
  521. struct rhashtable *ht;
  522. const void *key;
  523. };
  524. static bool rhashtable_compare(void *ptr, void *arg)
  525. {
  526. struct rhashtable_compare_arg *x = arg;
  527. struct rhashtable *ht = x->ht;
  528. return !memcmp(ptr + ht->p.key_offset, x->key, ht->p.key_len);
  529. }
  530. /**
  531. * rhashtable_lookup - lookup key in hash table
  532. * @ht: hash table
  533. * @key: pointer to key
  534. *
  535. * Computes the hash value for the key and traverses the bucket chain looking
  536. * for a entry with an identical key. The first matching entry is returned.
  537. *
  538. * This lookup function may only be used for fixed key hash table (key_len
  539. * parameter set). It will BUG() if used inappropriately.
  540. *
  541. * Lookups may occur in parallel with hashtable mutations and resizing.
  542. */
  543. void *rhashtable_lookup(struct rhashtable *ht, const void *key)
  544. {
  545. struct rhashtable_compare_arg arg = {
  546. .ht = ht,
  547. .key = key,
  548. };
  549. BUG_ON(!ht->p.key_len);
  550. return rhashtable_lookup_compare(ht, key, &rhashtable_compare, &arg);
  551. }
  552. EXPORT_SYMBOL_GPL(rhashtable_lookup);
  553. /**
  554. * rhashtable_lookup_compare - search hash table with compare function
  555. * @ht: hash table
  556. * @key: the pointer to the key
  557. * @compare: compare function, must return true on match
  558. * @arg: argument passed on to compare function
  559. *
  560. * Traverses the bucket chain behind the provided hash value and calls the
  561. * specified compare function for each entry.
  562. *
  563. * Lookups may occur in parallel with hashtable mutations and resizing.
  564. *
  565. * Returns the first entry on which the compare function returned true.
  566. */
  567. void *rhashtable_lookup_compare(struct rhashtable *ht, const void *key,
  568. bool (*compare)(void *, void *), void *arg)
  569. {
  570. const struct bucket_table *tbl, *old_tbl;
  571. struct rhash_head *he;
  572. u32 hash;
  573. rcu_read_lock();
  574. old_tbl = rht_dereference_rcu(ht->tbl, ht);
  575. tbl = rht_dereference_rcu(ht->future_tbl, ht);
  576. hash = key_hashfn(ht, key, ht->p.key_len);
  577. restart:
  578. rht_for_each_rcu(he, tbl, rht_bucket_index(tbl, hash)) {
  579. if (!compare(rht_obj(ht, he), arg))
  580. continue;
  581. rcu_read_unlock();
  582. return rht_obj(ht, he);
  583. }
  584. if (unlikely(tbl != old_tbl)) {
  585. tbl = old_tbl;
  586. goto restart;
  587. }
  588. rcu_read_unlock();
  589. return NULL;
  590. }
  591. EXPORT_SYMBOL_GPL(rhashtable_lookup_compare);
  592. /**
  593. * rhashtable_lookup_insert - lookup and insert object into hash table
  594. * @ht: hash table
  595. * @obj: pointer to hash head inside object
  596. *
  597. * Locks down the bucket chain in both the old and new table if a resize
  598. * is in progress to ensure that writers can't remove from the old table
  599. * and can't insert to the new table during the atomic operation of search
  600. * and insertion. Searches for duplicates in both the old and new table if
  601. * a resize is in progress.
  602. *
  603. * This lookup function may only be used for fixed key hash table (key_len
  604. * parameter set). It will BUG() if used inappropriately.
  605. *
  606. * It is safe to call this function from atomic context.
  607. *
  608. * Will trigger an automatic deferred table resizing if the size grows
  609. * beyond the watermark indicated by grow_decision() which can be passed
  610. * to rhashtable_init().
  611. */
  612. bool rhashtable_lookup_insert(struct rhashtable *ht, struct rhash_head *obj)
  613. {
  614. struct bucket_table *new_tbl, *old_tbl;
  615. spinlock_t *new_bucket_lock, *old_bucket_lock;
  616. u32 new_hash, old_hash;
  617. bool success = true;
  618. BUG_ON(!ht->p.key_len);
  619. rcu_read_lock();
  620. old_tbl = rht_dereference_rcu(ht->tbl, ht);
  621. old_hash = head_hashfn(ht, old_tbl, obj);
  622. old_bucket_lock = bucket_lock(old_tbl, old_hash);
  623. spin_lock_bh(old_bucket_lock);
  624. new_tbl = rht_dereference_rcu(ht->future_tbl, ht);
  625. new_hash = head_hashfn(ht, new_tbl, obj);
  626. new_bucket_lock = bucket_lock(new_tbl, new_hash);
  627. if (unlikely(old_tbl != new_tbl))
  628. spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED);
  629. if (rhashtable_lookup(ht, rht_obj(ht, obj) + ht->p.key_offset)) {
  630. success = false;
  631. goto exit;
  632. }
  633. __rhashtable_insert(ht, obj, new_tbl, new_hash);
  634. exit:
  635. if (unlikely(old_tbl != new_tbl))
  636. spin_unlock_bh(new_bucket_lock);
  637. spin_unlock_bh(old_bucket_lock);
  638. rcu_read_unlock();
  639. return success;
  640. }
  641. EXPORT_SYMBOL_GPL(rhashtable_lookup_insert);
  642. static size_t rounded_hashtable_size(struct rhashtable_params *params)
  643. {
  644. return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
  645. 1UL << params->min_shift);
  646. }
  647. /**
  648. * rhashtable_init - initialize a new hash table
  649. * @ht: hash table to be initialized
  650. * @params: configuration parameters
  651. *
  652. * Initializes a new hash table based on the provided configuration
  653. * parameters. A table can be configured either with a variable or
  654. * fixed length key:
  655. *
  656. * Configuration Example 1: Fixed length keys
  657. * struct test_obj {
  658. * int key;
  659. * void * my_member;
  660. * struct rhash_head node;
  661. * };
  662. *
  663. * struct rhashtable_params params = {
  664. * .head_offset = offsetof(struct test_obj, node),
  665. * .key_offset = offsetof(struct test_obj, key),
  666. * .key_len = sizeof(int),
  667. * .hashfn = jhash,
  668. * .nulls_base = (1U << RHT_BASE_SHIFT),
  669. * };
  670. *
  671. * Configuration Example 2: Variable length keys
  672. * struct test_obj {
  673. * [...]
  674. * struct rhash_head node;
  675. * };
  676. *
  677. * u32 my_hash_fn(const void *data, u32 seed)
  678. * {
  679. * struct test_obj *obj = data;
  680. *
  681. * return [... hash ...];
  682. * }
  683. *
  684. * struct rhashtable_params params = {
  685. * .head_offset = offsetof(struct test_obj, node),
  686. * .hashfn = jhash,
  687. * .obj_hashfn = my_hash_fn,
  688. * };
  689. */
  690. int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params)
  691. {
  692. struct bucket_table *tbl;
  693. size_t size;
  694. size = HASH_DEFAULT_SIZE;
  695. if ((params->key_len && !params->hashfn) ||
  696. (!params->key_len && !params->obj_hashfn))
  697. return -EINVAL;
  698. if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))
  699. return -EINVAL;
  700. params->min_shift = max_t(size_t, params->min_shift,
  701. ilog2(HASH_MIN_SIZE));
  702. if (params->nelem_hint)
  703. size = rounded_hashtable_size(params);
  704. memset(ht, 0, sizeof(*ht));
  705. mutex_init(&ht->mutex);
  706. memcpy(&ht->p, params, sizeof(*params));
  707. if (params->locks_mul)
  708. ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
  709. else
  710. ht->p.locks_mul = BUCKET_LOCKS_PER_CPU;
  711. tbl = bucket_table_alloc(ht, size);
  712. if (tbl == NULL)
  713. return -ENOMEM;
  714. ht->shift = ilog2(tbl->size);
  715. RCU_INIT_POINTER(ht->tbl, tbl);
  716. RCU_INIT_POINTER(ht->future_tbl, tbl);
  717. if (!ht->p.hash_rnd)
  718. get_random_bytes(&ht->p.hash_rnd, sizeof(ht->p.hash_rnd));
  719. if (ht->p.grow_decision || ht->p.shrink_decision)
  720. INIT_DEFERRABLE_WORK(&ht->run_work, rht_deferred_worker);
  721. return 0;
  722. }
  723. EXPORT_SYMBOL_GPL(rhashtable_init);
  724. /**
  725. * rhashtable_destroy - destroy hash table
  726. * @ht: the hash table to destroy
  727. *
  728. * Frees the bucket array. This function is not rcu safe, therefore the caller
  729. * has to make sure that no resizing may happen by unpublishing the hashtable
  730. * and waiting for the quiescent cycle before releasing the bucket array.
  731. */
  732. void rhashtable_destroy(struct rhashtable *ht)
  733. {
  734. ht->being_destroyed = true;
  735. mutex_lock(&ht->mutex);
  736. cancel_delayed_work(&ht->run_work);
  737. bucket_table_free(rht_dereference(ht->tbl, ht));
  738. mutex_unlock(&ht->mutex);
  739. }
  740. EXPORT_SYMBOL_GPL(rhashtable_destroy);
  741. /**************************************************************************
  742. * Self Test
  743. **************************************************************************/
  744. #ifdef CONFIG_TEST_RHASHTABLE
  745. #define TEST_HT_SIZE 8
  746. #define TEST_ENTRIES 2048
  747. #define TEST_PTR ((void *) 0xdeadbeef)
  748. #define TEST_NEXPANDS 4
  749. struct test_obj {
  750. void *ptr;
  751. int value;
  752. struct rhash_head node;
  753. };
  754. static int __init test_rht_lookup(struct rhashtable *ht)
  755. {
  756. unsigned int i;
  757. for (i = 0; i < TEST_ENTRIES * 2; i++) {
  758. struct test_obj *obj;
  759. bool expected = !(i % 2);
  760. u32 key = i;
  761. obj = rhashtable_lookup(ht, &key);
  762. if (expected && !obj) {
  763. pr_warn("Test failed: Could not find key %u\n", key);
  764. return -ENOENT;
  765. } else if (!expected && obj) {
  766. pr_warn("Test failed: Unexpected entry found for key %u\n",
  767. key);
  768. return -EEXIST;
  769. } else if (expected && obj) {
  770. if (obj->ptr != TEST_PTR || obj->value != i) {
  771. pr_warn("Test failed: Lookup value mismatch %p!=%p, %u!=%u\n",
  772. obj->ptr, TEST_PTR, obj->value, i);
  773. return -EINVAL;
  774. }
  775. }
  776. }
  777. return 0;
  778. }
  779. static void test_bucket_stats(struct rhashtable *ht, bool quiet)
  780. {
  781. unsigned int cnt, rcu_cnt, i, total = 0;
  782. struct rhash_head *pos;
  783. struct test_obj *obj;
  784. struct bucket_table *tbl;
  785. tbl = rht_dereference_rcu(ht->tbl, ht);
  786. for (i = 0; i < tbl->size; i++) {
  787. rcu_cnt = cnt = 0;
  788. if (!quiet)
  789. pr_info(" [%#4x/%zu]", i, tbl->size);
  790. rht_for_each_entry_rcu(obj, pos, tbl, i, node) {
  791. cnt++;
  792. total++;
  793. if (!quiet)
  794. pr_cont(" [%p],", obj);
  795. }
  796. rht_for_each_entry_rcu(obj, pos, tbl, i, node)
  797. rcu_cnt++;
  798. if (rcu_cnt != cnt)
  799. pr_warn("Test failed: Chain count mismach %d != %d",
  800. cnt, rcu_cnt);
  801. if (!quiet)
  802. pr_cont("\n [%#x] first element: %p, chain length: %u\n",
  803. i, tbl->buckets[i], cnt);
  804. }
  805. pr_info(" Traversal complete: counted=%u, nelems=%u, entries=%d\n",
  806. total, atomic_read(&ht->nelems), TEST_ENTRIES);
  807. if (total != atomic_read(&ht->nelems) || total != TEST_ENTRIES)
  808. pr_warn("Test failed: Total count mismatch ^^^");
  809. }
  810. static int __init test_rhashtable(struct rhashtable *ht)
  811. {
  812. struct bucket_table *tbl;
  813. struct test_obj *obj;
  814. struct rhash_head *pos, *next;
  815. int err;
  816. unsigned int i;
  817. /*
  818. * Insertion Test:
  819. * Insert TEST_ENTRIES into table with all keys even numbers
  820. */
  821. pr_info(" Adding %d keys\n", TEST_ENTRIES);
  822. for (i = 0; i < TEST_ENTRIES; i++) {
  823. struct test_obj *obj;
  824. obj = kzalloc(sizeof(*obj), GFP_KERNEL);
  825. if (!obj) {
  826. err = -ENOMEM;
  827. goto error;
  828. }
  829. obj->ptr = TEST_PTR;
  830. obj->value = i * 2;
  831. rhashtable_insert(ht, &obj->node);
  832. }
  833. rcu_read_lock();
  834. test_bucket_stats(ht, true);
  835. test_rht_lookup(ht);
  836. rcu_read_unlock();
  837. for (i = 0; i < TEST_NEXPANDS; i++) {
  838. pr_info(" Table expansion iteration %u...\n", i);
  839. mutex_lock(&ht->mutex);
  840. rhashtable_expand(ht);
  841. mutex_unlock(&ht->mutex);
  842. rcu_read_lock();
  843. pr_info(" Verifying lookups...\n");
  844. test_rht_lookup(ht);
  845. rcu_read_unlock();
  846. }
  847. for (i = 0; i < TEST_NEXPANDS; i++) {
  848. pr_info(" Table shrinkage iteration %u...\n", i);
  849. mutex_lock(&ht->mutex);
  850. rhashtable_shrink(ht);
  851. mutex_unlock(&ht->mutex);
  852. rcu_read_lock();
  853. pr_info(" Verifying lookups...\n");
  854. test_rht_lookup(ht);
  855. rcu_read_unlock();
  856. }
  857. rcu_read_lock();
  858. test_bucket_stats(ht, true);
  859. rcu_read_unlock();
  860. pr_info(" Deleting %d keys\n", TEST_ENTRIES);
  861. for (i = 0; i < TEST_ENTRIES; i++) {
  862. u32 key = i * 2;
  863. obj = rhashtable_lookup(ht, &key);
  864. BUG_ON(!obj);
  865. rhashtable_remove(ht, &obj->node);
  866. kfree(obj);
  867. }
  868. return 0;
  869. error:
  870. tbl = rht_dereference_rcu(ht->tbl, ht);
  871. for (i = 0; i < tbl->size; i++)
  872. rht_for_each_entry_safe(obj, pos, next, tbl, i, node)
  873. kfree(obj);
  874. return err;
  875. }
  876. static int __init test_rht_init(void)
  877. {
  878. struct rhashtable ht;
  879. struct rhashtable_params params = {
  880. .nelem_hint = TEST_HT_SIZE,
  881. .head_offset = offsetof(struct test_obj, node),
  882. .key_offset = offsetof(struct test_obj, value),
  883. .key_len = sizeof(int),
  884. .hashfn = jhash,
  885. .nulls_base = (3U << RHT_BASE_SHIFT),
  886. .grow_decision = rht_grow_above_75,
  887. .shrink_decision = rht_shrink_below_30,
  888. };
  889. int err;
  890. pr_info("Running resizable hashtable tests...\n");
  891. err = rhashtable_init(&ht, &params);
  892. if (err < 0) {
  893. pr_warn("Test failed: Unable to initialize hashtable: %d\n",
  894. err);
  895. return err;
  896. }
  897. err = test_rhashtable(&ht);
  898. rhashtable_destroy(&ht);
  899. return err;
  900. }
  901. subsys_initcall(test_rht_init);
  902. #endif /* CONFIG_TEST_RHASHTABLE */