rhashtable.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127
  1. /*
  2. * Resizable, Scalable, Concurrent Hash Table
  3. *
  4. * Copyright (c) 2014 Thomas Graf <tgraf@suug.ch>
  5. * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
  6. *
  7. * Based on the following paper:
  8. * https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf
  9. *
  10. * Code partially derived from nft_hash
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License version 2 as
  14. * published by the Free Software Foundation.
  15. */
  16. #include <linux/kernel.h>
  17. #include <linux/init.h>
  18. #include <linux/log2.h>
  19. #include <linux/slab.h>
  20. #include <linux/vmalloc.h>
  21. #include <linux/mm.h>
  22. #include <linux/jhash.h>
  23. #include <linux/random.h>
  24. #include <linux/rhashtable.h>
  25. #define HASH_DEFAULT_SIZE 64UL
  26. #define HASH_MIN_SIZE 4UL
  27. #define BUCKET_LOCKS_PER_CPU 128UL
  28. /* Base bits plus 1 bit for nulls marker */
  29. #define HASH_RESERVED_SPACE (RHT_BASE_BITS + 1)
  30. enum {
  31. RHT_LOCK_NORMAL,
  32. RHT_LOCK_NESTED,
  33. RHT_LOCK_NESTED2,
  34. };
  35. /* The bucket lock is selected based on the hash and protects mutations
  36. * on a group of hash buckets.
  37. *
  38. * IMPORTANT: When holding the bucket lock of both the old and new table
  39. * during expansions and shrinking, the old bucket lock must always be
  40. * acquired first.
  41. */
  42. static spinlock_t *bucket_lock(const struct bucket_table *tbl, u32 hash)
  43. {
  44. return &tbl->locks[hash & tbl->locks_mask];
  45. }
  46. #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
  47. #define ASSERT_BUCKET_LOCK(TBL, HASH) \
  48. BUG_ON(!lockdep_rht_bucket_is_held(TBL, HASH))
  49. #ifdef CONFIG_PROVE_LOCKING
  50. int lockdep_rht_mutex_is_held(struct rhashtable *ht)
  51. {
  52. return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1;
  53. }
  54. EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
  55. int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
  56. {
  57. spinlock_t *lock = bucket_lock(tbl, hash);
  58. return (debug_locks) ? lockdep_is_held(lock) : 1;
  59. }
  60. EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
  61. #endif
  62. static void *rht_obj(const struct rhashtable *ht, const struct rhash_head *he)
  63. {
  64. return (void *) he - ht->p.head_offset;
  65. }
  66. static u32 rht_bucket_index(const struct bucket_table *tbl, u32 hash)
  67. {
  68. return hash & (tbl->size - 1);
  69. }
  70. static u32 obj_raw_hashfn(const struct rhashtable *ht, const void *ptr)
  71. {
  72. u32 hash;
  73. if (unlikely(!ht->p.key_len))
  74. hash = ht->p.obj_hashfn(ptr, ht->p.hash_rnd);
  75. else
  76. hash = ht->p.hashfn(ptr + ht->p.key_offset, ht->p.key_len,
  77. ht->p.hash_rnd);
  78. return hash >> HASH_RESERVED_SPACE;
  79. }
  80. static u32 key_hashfn(struct rhashtable *ht, const void *key, u32 len)
  81. {
  82. struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
  83. u32 hash;
  84. hash = ht->p.hashfn(key, len, ht->p.hash_rnd);
  85. hash >>= HASH_RESERVED_SPACE;
  86. return rht_bucket_index(tbl, hash);
  87. }
  88. static u32 head_hashfn(const struct rhashtable *ht,
  89. const struct bucket_table *tbl,
  90. const struct rhash_head *he)
  91. {
  92. return rht_bucket_index(tbl, obj_raw_hashfn(ht, rht_obj(ht, he)));
  93. }
  94. static struct rhash_head __rcu **bucket_tail(struct bucket_table *tbl, u32 n)
  95. {
  96. struct rhash_head __rcu **pprev;
  97. for (pprev = &tbl->buckets[n];
  98. !rht_is_a_nulls(rht_dereference_bucket(*pprev, tbl, n));
  99. pprev = &rht_dereference_bucket(*pprev, tbl, n)->next)
  100. ;
  101. return pprev;
  102. }
  103. static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl)
  104. {
  105. unsigned int i, size;
  106. #if defined(CONFIG_PROVE_LOCKING)
  107. unsigned int nr_pcpus = 2;
  108. #else
  109. unsigned int nr_pcpus = num_possible_cpus();
  110. #endif
  111. nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL);
  112. size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul);
  113. /* Never allocate more than one lock per bucket */
  114. size = min_t(unsigned int, size, tbl->size);
  115. if (sizeof(spinlock_t) != 0) {
  116. #ifdef CONFIG_NUMA
  117. if (size * sizeof(spinlock_t) > PAGE_SIZE)
  118. tbl->locks = vmalloc(size * sizeof(spinlock_t));
  119. else
  120. #endif
  121. tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
  122. GFP_KERNEL);
  123. if (!tbl->locks)
  124. return -ENOMEM;
  125. for (i = 0; i < size; i++)
  126. spin_lock_init(&tbl->locks[i]);
  127. }
  128. tbl->locks_mask = size - 1;
  129. return 0;
  130. }
  131. static void bucket_table_free(const struct bucket_table *tbl)
  132. {
  133. if (tbl)
  134. kvfree(tbl->locks);
  135. kvfree(tbl);
  136. }
  137. static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
  138. size_t nbuckets)
  139. {
  140. struct bucket_table *tbl;
  141. size_t size;
  142. int i;
  143. size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
  144. tbl = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
  145. if (tbl == NULL)
  146. tbl = vzalloc(size);
  147. if (tbl == NULL)
  148. return NULL;
  149. tbl->size = nbuckets;
  150. if (alloc_bucket_locks(ht, tbl) < 0) {
  151. bucket_table_free(tbl);
  152. return NULL;
  153. }
  154. for (i = 0; i < nbuckets; i++)
  155. INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i);
  156. return tbl;
  157. }
  158. /**
  159. * rht_grow_above_75 - returns true if nelems > 0.75 * table-size
  160. * @ht: hash table
  161. * @new_size: new table size
  162. */
  163. bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size)
  164. {
  165. /* Expand table when exceeding 75% load */
  166. return atomic_read(&ht->nelems) > (new_size / 4 * 3) &&
  167. (ht->p.max_shift && atomic_read(&ht->shift) < ht->p.max_shift);
  168. }
  169. EXPORT_SYMBOL_GPL(rht_grow_above_75);
  170. /**
  171. * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
  172. * @ht: hash table
  173. * @new_size: new table size
  174. */
  175. bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size)
  176. {
  177. /* Shrink table beneath 30% load */
  178. return atomic_read(&ht->nelems) < (new_size * 3 / 10) &&
  179. (atomic_read(&ht->shift) > ht->p.min_shift);
  180. }
  181. EXPORT_SYMBOL_GPL(rht_shrink_below_30);
  182. static void hashtable_chain_unzip(const struct rhashtable *ht,
  183. const struct bucket_table *new_tbl,
  184. struct bucket_table *old_tbl,
  185. size_t old_hash)
  186. {
  187. struct rhash_head *he, *p, *next;
  188. spinlock_t *new_bucket_lock, *new_bucket_lock2 = NULL;
  189. unsigned int new_hash, new_hash2;
  190. ASSERT_BUCKET_LOCK(old_tbl, old_hash);
  191. /* Old bucket empty, no work needed. */
  192. p = rht_dereference_bucket(old_tbl->buckets[old_hash], old_tbl,
  193. old_hash);
  194. if (rht_is_a_nulls(p))
  195. return;
  196. new_hash = new_hash2 = head_hashfn(ht, new_tbl, p);
  197. new_bucket_lock = bucket_lock(new_tbl, new_hash);
  198. /* Advance the old bucket pointer one or more times until it
  199. * reaches a node that doesn't hash to the same bucket as the
  200. * previous node p. Call the previous node p;
  201. */
  202. rht_for_each_continue(he, p->next, old_tbl, old_hash) {
  203. new_hash2 = head_hashfn(ht, new_tbl, he);
  204. if (new_hash != new_hash2)
  205. break;
  206. p = he;
  207. }
  208. rcu_assign_pointer(old_tbl->buckets[old_hash], p->next);
  209. spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED);
  210. /* If we have encountered an entry that maps to a different bucket in
  211. * the new table, lock down that bucket as well as we might cut off
  212. * the end of the chain.
  213. */
  214. new_bucket_lock2 = bucket_lock(new_tbl, new_hash);
  215. if (new_bucket_lock != new_bucket_lock2)
  216. spin_lock_bh_nested(new_bucket_lock2, RHT_LOCK_NESTED2);
  217. /* Find the subsequent node which does hash to the same
  218. * bucket as node P, or NULL if no such node exists.
  219. */
  220. INIT_RHT_NULLS_HEAD(next, ht, old_hash);
  221. if (!rht_is_a_nulls(he)) {
  222. rht_for_each_continue(he, he->next, old_tbl, old_hash) {
  223. if (head_hashfn(ht, new_tbl, he) == new_hash) {
  224. next = he;
  225. break;
  226. }
  227. }
  228. }
  229. /* Set p's next pointer to that subsequent node pointer,
  230. * bypassing the nodes which do not hash to p's bucket
  231. */
  232. rcu_assign_pointer(p->next, next);
  233. if (new_bucket_lock != new_bucket_lock2)
  234. spin_unlock_bh(new_bucket_lock2);
  235. spin_unlock_bh(new_bucket_lock);
  236. }
  237. static void link_old_to_new(struct bucket_table *new_tbl,
  238. unsigned int new_hash, struct rhash_head *entry)
  239. {
  240. spinlock_t *new_bucket_lock;
  241. new_bucket_lock = bucket_lock(new_tbl, new_hash);
  242. spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED);
  243. rcu_assign_pointer(*bucket_tail(new_tbl, new_hash), entry);
  244. spin_unlock_bh(new_bucket_lock);
  245. }
  246. /**
  247. * rhashtable_expand - Expand hash table while allowing concurrent lookups
  248. * @ht: the hash table to expand
  249. *
  250. * A secondary bucket array is allocated and the hash entries are migrated
  251. * while keeping them on both lists until the end of the RCU grace period.
  252. *
  253. * This function may only be called in a context where it is safe to call
  254. * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
  255. *
  256. * The caller must ensure that no concurrent resizing occurs by holding
  257. * ht->mutex.
  258. *
  259. * It is valid to have concurrent insertions and deletions protected by per
  260. * bucket locks or concurrent RCU protected lookups and traversals.
  261. */
  262. int rhashtable_expand(struct rhashtable *ht)
  263. {
  264. struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
  265. struct rhash_head *he;
  266. spinlock_t *old_bucket_lock;
  267. unsigned int new_hash, old_hash;
  268. bool complete = false;
  269. ASSERT_RHT_MUTEX(ht);
  270. new_tbl = bucket_table_alloc(ht, old_tbl->size * 2);
  271. if (new_tbl == NULL)
  272. return -ENOMEM;
  273. atomic_inc(&ht->shift);
  274. /* Make insertions go into the new, empty table right away. Deletions
  275. * and lookups will be attempted in both tables until we synchronize.
  276. * The synchronize_rcu() guarantees for the new table to be picked up
  277. * so no new additions go into the old table while we relink.
  278. */
  279. rcu_assign_pointer(ht->future_tbl, new_tbl);
  280. synchronize_rcu();
  281. /* For each new bucket, search the corresponding old bucket for the
  282. * first entry that hashes to the new bucket, and link the end of
  283. * newly formed bucket chain (containing entries added to future
  284. * table) to that entry. Since all the entries which will end up in
  285. * the new bucket appear in the same old bucket, this constructs an
  286. * entirely valid new hash table, but with multiple buckets
  287. * "zipped" together into a single imprecise chain.
  288. */
  289. for (new_hash = 0; new_hash < new_tbl->size; new_hash++) {
  290. old_hash = rht_bucket_index(old_tbl, new_hash);
  291. old_bucket_lock = bucket_lock(old_tbl, old_hash);
  292. spin_lock_bh(old_bucket_lock);
  293. rht_for_each(he, old_tbl, old_hash) {
  294. if (head_hashfn(ht, new_tbl, he) == new_hash) {
  295. link_old_to_new(new_tbl, new_hash, he);
  296. break;
  297. }
  298. }
  299. spin_unlock_bh(old_bucket_lock);
  300. }
  301. /* Publish the new table pointer. Lookups may now traverse
  302. * the new table, but they will not benefit from any
  303. * additional efficiency until later steps unzip the buckets.
  304. */
  305. rcu_assign_pointer(ht->tbl, new_tbl);
  306. /* Unzip interleaved hash chains */
  307. while (!complete && !ht->being_destroyed) {
  308. /* Wait for readers. All new readers will see the new
  309. * table, and thus no references to the old table will
  310. * remain.
  311. */
  312. synchronize_rcu();
  313. /* For each bucket in the old table (each of which
  314. * contains items from multiple buckets of the new
  315. * table): ...
  316. */
  317. complete = true;
  318. for (old_hash = 0; old_hash < old_tbl->size; old_hash++) {
  319. struct rhash_head *head;
  320. old_bucket_lock = bucket_lock(old_tbl, old_hash);
  321. spin_lock_bh(old_bucket_lock);
  322. hashtable_chain_unzip(ht, new_tbl, old_tbl, old_hash);
  323. head = rht_dereference_bucket(old_tbl->buckets[old_hash],
  324. old_tbl, old_hash);
  325. if (!rht_is_a_nulls(head))
  326. complete = false;
  327. spin_unlock_bh(old_bucket_lock);
  328. }
  329. }
  330. bucket_table_free(old_tbl);
  331. return 0;
  332. }
  333. EXPORT_SYMBOL_GPL(rhashtable_expand);
  334. /**
  335. * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
  336. * @ht: the hash table to shrink
  337. *
  338. * This function may only be called in a context where it is safe to call
  339. * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
  340. *
  341. * The caller must ensure that no concurrent resizing occurs by holding
  342. * ht->mutex.
  343. *
  344. * The caller must ensure that no concurrent table mutations take place.
  345. * It is however valid to have concurrent lookups if they are RCU protected.
  346. *
  347. * It is valid to have concurrent insertions and deletions protected by per
  348. * bucket locks or concurrent RCU protected lookups and traversals.
  349. */
  350. int rhashtable_shrink(struct rhashtable *ht)
  351. {
  352. struct bucket_table *new_tbl, *tbl = rht_dereference(ht->tbl, ht);
  353. spinlock_t *new_bucket_lock, *old_bucket_lock1, *old_bucket_lock2;
  354. unsigned int new_hash;
  355. ASSERT_RHT_MUTEX(ht);
  356. new_tbl = bucket_table_alloc(ht, tbl->size / 2);
  357. if (new_tbl == NULL)
  358. return -ENOMEM;
  359. rcu_assign_pointer(ht->future_tbl, new_tbl);
  360. synchronize_rcu();
  361. /* Link the first entry in the old bucket to the end of the
  362. * bucket in the new table. As entries are concurrently being
  363. * added to the new table, lock down the new bucket. As we
  364. * always divide the size in half when shrinking, each bucket
  365. * in the new table maps to exactly two buckets in the old
  366. * table.
  367. *
  368. * As removals can occur concurrently on the old table, we need
  369. * to lock down both matching buckets in the old table.
  370. */
  371. for (new_hash = 0; new_hash < new_tbl->size; new_hash++) {
  372. old_bucket_lock1 = bucket_lock(tbl, new_hash);
  373. old_bucket_lock2 = bucket_lock(tbl, new_hash + new_tbl->size);
  374. new_bucket_lock = bucket_lock(new_tbl, new_hash);
  375. spin_lock_bh(old_bucket_lock1);
  376. spin_lock_bh_nested(old_bucket_lock2, RHT_LOCK_NESTED);
  377. spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED2);
  378. rcu_assign_pointer(*bucket_tail(new_tbl, new_hash),
  379. tbl->buckets[new_hash]);
  380. rcu_assign_pointer(*bucket_tail(new_tbl, new_hash),
  381. tbl->buckets[new_hash + new_tbl->size]);
  382. spin_unlock_bh(new_bucket_lock);
  383. spin_unlock_bh(old_bucket_lock2);
  384. spin_unlock_bh(old_bucket_lock1);
  385. }
  386. /* Publish the new, valid hash table */
  387. rcu_assign_pointer(ht->tbl, new_tbl);
  388. atomic_dec(&ht->shift);
  389. /* Wait for readers. No new readers will have references to the
  390. * old hash table.
  391. */
  392. synchronize_rcu();
  393. bucket_table_free(tbl);
  394. return 0;
  395. }
  396. EXPORT_SYMBOL_GPL(rhashtable_shrink);
  397. static void rht_deferred_worker(struct work_struct *work)
  398. {
  399. struct rhashtable *ht;
  400. struct bucket_table *tbl;
  401. ht = container_of(work, struct rhashtable, run_work.work);
  402. mutex_lock(&ht->mutex);
  403. tbl = rht_dereference(ht->tbl, ht);
  404. if (ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size))
  405. rhashtable_expand(ht);
  406. else if (ht->p.shrink_decision && ht->p.shrink_decision(ht, tbl->size))
  407. rhashtable_shrink(ht);
  408. mutex_unlock(&ht->mutex);
  409. }
  410. static void rhashtable_wakeup_worker(struct rhashtable *ht)
  411. {
  412. struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
  413. struct bucket_table *new_tbl = rht_dereference_rcu(ht->future_tbl, ht);
  414. size_t size = tbl->size;
  415. /* Only adjust the table if no resizing is currently in progress. */
  416. if (tbl == new_tbl &&
  417. ((ht->p.grow_decision && ht->p.grow_decision(ht, size)) ||
  418. (ht->p.shrink_decision && ht->p.shrink_decision(ht, size))))
  419. schedule_delayed_work(&ht->run_work, 0);
  420. }
  421. static void __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
  422. struct bucket_table *tbl, u32 hash)
  423. {
  424. struct rhash_head *head = rht_dereference_bucket(tbl->buckets[hash],
  425. tbl, hash);
  426. if (rht_is_a_nulls(head))
  427. INIT_RHT_NULLS_HEAD(obj->next, ht, hash);
  428. else
  429. RCU_INIT_POINTER(obj->next, head);
  430. rcu_assign_pointer(tbl->buckets[hash], obj);
  431. atomic_inc(&ht->nelems);
  432. rhashtable_wakeup_worker(ht);
  433. }
  434. /**
  435. * rhashtable_insert - insert object into hash table
  436. * @ht: hash table
  437. * @obj: pointer to hash head inside object
  438. *
  439. * Will take a per bucket spinlock to protect against mutual mutations
  440. * on the same bucket. Multiple insertions may occur in parallel unless
  441. * they map to the same bucket lock.
  442. *
  443. * It is safe to call this function from atomic context.
  444. *
  445. * Will trigger an automatic deferred table resizing if the size grows
  446. * beyond the watermark indicated by grow_decision() which can be passed
  447. * to rhashtable_init().
  448. */
  449. void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj)
  450. {
  451. struct bucket_table *tbl;
  452. spinlock_t *lock;
  453. unsigned hash;
  454. rcu_read_lock();
  455. tbl = rht_dereference_rcu(ht->future_tbl, ht);
  456. hash = head_hashfn(ht, tbl, obj);
  457. lock = bucket_lock(tbl, hash);
  458. spin_lock_bh(lock);
  459. __rhashtable_insert(ht, obj, tbl, hash);
  460. spin_unlock_bh(lock);
  461. rcu_read_unlock();
  462. }
  463. EXPORT_SYMBOL_GPL(rhashtable_insert);
  464. /**
  465. * rhashtable_remove - remove object from hash table
  466. * @ht: hash table
  467. * @obj: pointer to hash head inside object
  468. *
  469. * Since the hash chain is single linked, the removal operation needs to
  470. * walk the bucket chain upon removal. The removal operation is thus
  471. * considerable slow if the hash table is not correctly sized.
  472. *
  473. * Will automatically shrink the table via rhashtable_expand() if the
  474. * shrink_decision function specified at rhashtable_init() returns true.
  475. *
  476. * The caller must ensure that no concurrent table mutations occur. It is
  477. * however valid to have concurrent lookups if they are RCU protected.
  478. */
  479. bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj)
  480. {
  481. struct bucket_table *tbl;
  482. struct rhash_head __rcu **pprev;
  483. struct rhash_head *he;
  484. spinlock_t *lock;
  485. unsigned int hash;
  486. rcu_read_lock();
  487. tbl = rht_dereference_rcu(ht->tbl, ht);
  488. hash = head_hashfn(ht, tbl, obj);
  489. lock = bucket_lock(tbl, hash);
  490. spin_lock_bh(lock);
  491. restart:
  492. pprev = &tbl->buckets[hash];
  493. rht_for_each(he, tbl, hash) {
  494. if (he != obj) {
  495. pprev = &he->next;
  496. continue;
  497. }
  498. rcu_assign_pointer(*pprev, obj->next);
  499. atomic_dec(&ht->nelems);
  500. spin_unlock_bh(lock);
  501. rhashtable_wakeup_worker(ht);
  502. rcu_read_unlock();
  503. return true;
  504. }
  505. if (tbl != rht_dereference_rcu(ht->future_tbl, ht)) {
  506. spin_unlock_bh(lock);
  507. tbl = rht_dereference_rcu(ht->future_tbl, ht);
  508. hash = head_hashfn(ht, tbl, obj);
  509. lock = bucket_lock(tbl, hash);
  510. spin_lock_bh(lock);
  511. goto restart;
  512. }
  513. spin_unlock_bh(lock);
  514. rcu_read_unlock();
  515. return false;
  516. }
  517. EXPORT_SYMBOL_GPL(rhashtable_remove);
  518. struct rhashtable_compare_arg {
  519. struct rhashtable *ht;
  520. const void *key;
  521. };
  522. static bool rhashtable_compare(void *ptr, void *arg)
  523. {
  524. struct rhashtable_compare_arg *x = arg;
  525. struct rhashtable *ht = x->ht;
  526. return !memcmp(ptr + ht->p.key_offset, x->key, ht->p.key_len);
  527. }
  528. /**
  529. * rhashtable_lookup - lookup key in hash table
  530. * @ht: hash table
  531. * @key: pointer to key
  532. *
  533. * Computes the hash value for the key and traverses the bucket chain looking
  534. * for a entry with an identical key. The first matching entry is returned.
  535. *
  536. * This lookup function may only be used for fixed key hash table (key_len
  537. * parameter set). It will BUG() if used inappropriately.
  538. *
  539. * Lookups may occur in parallel with hashtable mutations and resizing.
  540. */
  541. void *rhashtable_lookup(struct rhashtable *ht, const void *key)
  542. {
  543. struct rhashtable_compare_arg arg = {
  544. .ht = ht,
  545. .key = key,
  546. };
  547. BUG_ON(!ht->p.key_len);
  548. return rhashtable_lookup_compare(ht, key, &rhashtable_compare, &arg);
  549. }
  550. EXPORT_SYMBOL_GPL(rhashtable_lookup);
  551. /**
  552. * rhashtable_lookup_compare - search hash table with compare function
  553. * @ht: hash table
  554. * @key: the pointer to the key
  555. * @compare: compare function, must return true on match
  556. * @arg: argument passed on to compare function
  557. *
  558. * Traverses the bucket chain behind the provided hash value and calls the
  559. * specified compare function for each entry.
  560. *
  561. * Lookups may occur in parallel with hashtable mutations and resizing.
  562. *
  563. * Returns the first entry on which the compare function returned true.
  564. */
  565. void *rhashtable_lookup_compare(struct rhashtable *ht, const void *key,
  566. bool (*compare)(void *, void *), void *arg)
  567. {
  568. const struct bucket_table *tbl, *old_tbl;
  569. struct rhash_head *he;
  570. u32 hash;
  571. rcu_read_lock();
  572. old_tbl = rht_dereference_rcu(ht->tbl, ht);
  573. tbl = rht_dereference_rcu(ht->future_tbl, ht);
  574. hash = key_hashfn(ht, key, ht->p.key_len);
  575. restart:
  576. rht_for_each_rcu(he, tbl, rht_bucket_index(tbl, hash)) {
  577. if (!compare(rht_obj(ht, he), arg))
  578. continue;
  579. rcu_read_unlock();
  580. return rht_obj(ht, he);
  581. }
  582. if (unlikely(tbl != old_tbl)) {
  583. tbl = old_tbl;
  584. goto restart;
  585. }
  586. rcu_read_unlock();
  587. return NULL;
  588. }
  589. EXPORT_SYMBOL_GPL(rhashtable_lookup_compare);
  590. /**
  591. * rhashtable_lookup_insert - lookup and insert object into hash table
  592. * @ht: hash table
  593. * @obj: pointer to hash head inside object
  594. *
  595. * Locks down the bucket chain in both the old and new table if a resize
  596. * is in progress to ensure that writers can't remove from the old table
  597. * and can't insert to the new table during the atomic operation of search
  598. * and insertion. Searches for duplicates in both the old and new table if
  599. * a resize is in progress.
  600. *
  601. * This lookup function may only be used for fixed key hash table (key_len
  602. * parameter set). It will BUG() if used inappropriately.
  603. *
  604. * It is safe to call this function from atomic context.
  605. *
  606. * Will trigger an automatic deferred table resizing if the size grows
  607. * beyond the watermark indicated by grow_decision() which can be passed
  608. * to rhashtable_init().
  609. */
  610. bool rhashtable_lookup_insert(struct rhashtable *ht, struct rhash_head *obj)
  611. {
  612. struct rhashtable_compare_arg arg = {
  613. .ht = ht,
  614. .key = rht_obj(ht, obj) + ht->p.key_offset,
  615. };
  616. BUG_ON(!ht->p.key_len);
  617. return rhashtable_lookup_compare_insert(ht, obj, &rhashtable_compare,
  618. &arg);
  619. }
  620. EXPORT_SYMBOL_GPL(rhashtable_lookup_insert);
  621. /**
  622. * rhashtable_lookup_compare_insert - search and insert object to hash table
  623. * with compare function
  624. * @ht: hash table
  625. * @obj: pointer to hash head inside object
  626. * @compare: compare function, must return true on match
  627. * @arg: argument passed on to compare function
  628. *
  629. * Locks down the bucket chain in both the old and new table if a resize
  630. * is in progress to ensure that writers can't remove from the old table
  631. * and can't insert to the new table during the atomic operation of search
  632. * and insertion. Searches for duplicates in both the old and new table if
  633. * a resize is in progress.
  634. *
  635. * Lookups may occur in parallel with hashtable mutations and resizing.
  636. *
  637. * Will trigger an automatic deferred table resizing if the size grows
  638. * beyond the watermark indicated by grow_decision() which can be passed
  639. * to rhashtable_init().
  640. */
  641. bool rhashtable_lookup_compare_insert(struct rhashtable *ht,
  642. struct rhash_head *obj,
  643. bool (*compare)(void *, void *),
  644. void *arg)
  645. {
  646. struct bucket_table *new_tbl, *old_tbl;
  647. spinlock_t *new_bucket_lock, *old_bucket_lock;
  648. u32 new_hash, old_hash;
  649. bool success = true;
  650. BUG_ON(!ht->p.key_len);
  651. rcu_read_lock();
  652. old_tbl = rht_dereference_rcu(ht->tbl, ht);
  653. old_hash = head_hashfn(ht, old_tbl, obj);
  654. old_bucket_lock = bucket_lock(old_tbl, old_hash);
  655. spin_lock_bh(old_bucket_lock);
  656. new_tbl = rht_dereference_rcu(ht->future_tbl, ht);
  657. new_hash = head_hashfn(ht, new_tbl, obj);
  658. new_bucket_lock = bucket_lock(new_tbl, new_hash);
  659. if (unlikely(old_tbl != new_tbl))
  660. spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED);
  661. if (rhashtable_lookup_compare(ht, rht_obj(ht, obj) + ht->p.key_offset,
  662. compare, arg)) {
  663. success = false;
  664. goto exit;
  665. }
  666. __rhashtable_insert(ht, obj, new_tbl, new_hash);
  667. exit:
  668. if (unlikely(old_tbl != new_tbl))
  669. spin_unlock_bh(new_bucket_lock);
  670. spin_unlock_bh(old_bucket_lock);
  671. rcu_read_unlock();
  672. return success;
  673. }
  674. EXPORT_SYMBOL_GPL(rhashtable_lookup_compare_insert);
  675. static size_t rounded_hashtable_size(struct rhashtable_params *params)
  676. {
  677. return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
  678. 1UL << params->min_shift);
  679. }
  680. /**
  681. * rhashtable_init - initialize a new hash table
  682. * @ht: hash table to be initialized
  683. * @params: configuration parameters
  684. *
  685. * Initializes a new hash table based on the provided configuration
  686. * parameters. A table can be configured either with a variable or
  687. * fixed length key:
  688. *
  689. * Configuration Example 1: Fixed length keys
  690. * struct test_obj {
  691. * int key;
  692. * void * my_member;
  693. * struct rhash_head node;
  694. * };
  695. *
  696. * struct rhashtable_params params = {
  697. * .head_offset = offsetof(struct test_obj, node),
  698. * .key_offset = offsetof(struct test_obj, key),
  699. * .key_len = sizeof(int),
  700. * .hashfn = jhash,
  701. * .nulls_base = (1U << RHT_BASE_SHIFT),
  702. * };
  703. *
  704. * Configuration Example 2: Variable length keys
  705. * struct test_obj {
  706. * [...]
  707. * struct rhash_head node;
  708. * };
  709. *
  710. * u32 my_hash_fn(const void *data, u32 seed)
  711. * {
  712. * struct test_obj *obj = data;
  713. *
  714. * return [... hash ...];
  715. * }
  716. *
  717. * struct rhashtable_params params = {
  718. * .head_offset = offsetof(struct test_obj, node),
  719. * .hashfn = jhash,
  720. * .obj_hashfn = my_hash_fn,
  721. * };
  722. */
  723. int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params)
  724. {
  725. struct bucket_table *tbl;
  726. size_t size;
  727. size = HASH_DEFAULT_SIZE;
  728. if ((params->key_len && !params->hashfn) ||
  729. (!params->key_len && !params->obj_hashfn))
  730. return -EINVAL;
  731. if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))
  732. return -EINVAL;
  733. params->min_shift = max_t(size_t, params->min_shift,
  734. ilog2(HASH_MIN_SIZE));
  735. if (params->nelem_hint)
  736. size = rounded_hashtable_size(params);
  737. memset(ht, 0, sizeof(*ht));
  738. mutex_init(&ht->mutex);
  739. memcpy(&ht->p, params, sizeof(*params));
  740. if (params->locks_mul)
  741. ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
  742. else
  743. ht->p.locks_mul = BUCKET_LOCKS_PER_CPU;
  744. tbl = bucket_table_alloc(ht, size);
  745. if (tbl == NULL)
  746. return -ENOMEM;
  747. atomic_set(&ht->nelems, 0);
  748. atomic_set(&ht->shift, ilog2(tbl->size));
  749. RCU_INIT_POINTER(ht->tbl, tbl);
  750. RCU_INIT_POINTER(ht->future_tbl, tbl);
  751. if (!ht->p.hash_rnd)
  752. get_random_bytes(&ht->p.hash_rnd, sizeof(ht->p.hash_rnd));
  753. if (ht->p.grow_decision || ht->p.shrink_decision)
  754. INIT_DEFERRABLE_WORK(&ht->run_work, rht_deferred_worker);
  755. return 0;
  756. }
  757. EXPORT_SYMBOL_GPL(rhashtable_init);
  758. /**
  759. * rhashtable_destroy - destroy hash table
  760. * @ht: the hash table to destroy
  761. *
  762. * Frees the bucket array. This function is not rcu safe, therefore the caller
  763. * has to make sure that no resizing may happen by unpublishing the hashtable
  764. * and waiting for the quiescent cycle before releasing the bucket array.
  765. */
  766. void rhashtable_destroy(struct rhashtable *ht)
  767. {
  768. ht->being_destroyed = true;
  769. mutex_lock(&ht->mutex);
  770. cancel_delayed_work(&ht->run_work);
  771. bucket_table_free(rht_dereference(ht->tbl, ht));
  772. mutex_unlock(&ht->mutex);
  773. }
  774. EXPORT_SYMBOL_GPL(rhashtable_destroy);
  775. /**************************************************************************
  776. * Self Test
  777. **************************************************************************/
  778. #ifdef CONFIG_TEST_RHASHTABLE
  779. #define TEST_HT_SIZE 8
  780. #define TEST_ENTRIES 2048
  781. #define TEST_PTR ((void *) 0xdeadbeef)
  782. #define TEST_NEXPANDS 4
  783. struct test_obj {
  784. void *ptr;
  785. int value;
  786. struct rhash_head node;
  787. };
  788. static int __init test_rht_lookup(struct rhashtable *ht)
  789. {
  790. unsigned int i;
  791. for (i = 0; i < TEST_ENTRIES * 2; i++) {
  792. struct test_obj *obj;
  793. bool expected = !(i % 2);
  794. u32 key = i;
  795. obj = rhashtable_lookup(ht, &key);
  796. if (expected && !obj) {
  797. pr_warn("Test failed: Could not find key %u\n", key);
  798. return -ENOENT;
  799. } else if (!expected && obj) {
  800. pr_warn("Test failed: Unexpected entry found for key %u\n",
  801. key);
  802. return -EEXIST;
  803. } else if (expected && obj) {
  804. if (obj->ptr != TEST_PTR || obj->value != i) {
  805. pr_warn("Test failed: Lookup value mismatch %p!=%p, %u!=%u\n",
  806. obj->ptr, TEST_PTR, obj->value, i);
  807. return -EINVAL;
  808. }
  809. }
  810. }
  811. return 0;
  812. }
  813. static void test_bucket_stats(struct rhashtable *ht, bool quiet)
  814. {
  815. unsigned int cnt, rcu_cnt, i, total = 0;
  816. struct rhash_head *pos;
  817. struct test_obj *obj;
  818. struct bucket_table *tbl;
  819. tbl = rht_dereference_rcu(ht->tbl, ht);
  820. for (i = 0; i < tbl->size; i++) {
  821. rcu_cnt = cnt = 0;
  822. if (!quiet)
  823. pr_info(" [%#4x/%zu]", i, tbl->size);
  824. rht_for_each_entry_rcu(obj, pos, tbl, i, node) {
  825. cnt++;
  826. total++;
  827. if (!quiet)
  828. pr_cont(" [%p],", obj);
  829. }
  830. rht_for_each_entry_rcu(obj, pos, tbl, i, node)
  831. rcu_cnt++;
  832. if (rcu_cnt != cnt)
  833. pr_warn("Test failed: Chain count mismach %d != %d",
  834. cnt, rcu_cnt);
  835. if (!quiet)
  836. pr_cont("\n [%#x] first element: %p, chain length: %u\n",
  837. i, tbl->buckets[i], cnt);
  838. }
  839. pr_info(" Traversal complete: counted=%u, nelems=%u, entries=%d\n",
  840. total, atomic_read(&ht->nelems), TEST_ENTRIES);
  841. if (total != atomic_read(&ht->nelems) || total != TEST_ENTRIES)
  842. pr_warn("Test failed: Total count mismatch ^^^");
  843. }
  844. static int __init test_rhashtable(struct rhashtable *ht)
  845. {
  846. struct bucket_table *tbl;
  847. struct test_obj *obj;
  848. struct rhash_head *pos, *next;
  849. int err;
  850. unsigned int i;
  851. /*
  852. * Insertion Test:
  853. * Insert TEST_ENTRIES into table with all keys even numbers
  854. */
  855. pr_info(" Adding %d keys\n", TEST_ENTRIES);
  856. for (i = 0; i < TEST_ENTRIES; i++) {
  857. struct test_obj *obj;
  858. obj = kzalloc(sizeof(*obj), GFP_KERNEL);
  859. if (!obj) {
  860. err = -ENOMEM;
  861. goto error;
  862. }
  863. obj->ptr = TEST_PTR;
  864. obj->value = i * 2;
  865. rhashtable_insert(ht, &obj->node);
  866. }
  867. rcu_read_lock();
  868. test_bucket_stats(ht, true);
  869. test_rht_lookup(ht);
  870. rcu_read_unlock();
  871. for (i = 0; i < TEST_NEXPANDS; i++) {
  872. pr_info(" Table expansion iteration %u...\n", i);
  873. mutex_lock(&ht->mutex);
  874. rhashtable_expand(ht);
  875. mutex_unlock(&ht->mutex);
  876. rcu_read_lock();
  877. pr_info(" Verifying lookups...\n");
  878. test_rht_lookup(ht);
  879. rcu_read_unlock();
  880. }
  881. for (i = 0; i < TEST_NEXPANDS; i++) {
  882. pr_info(" Table shrinkage iteration %u...\n", i);
  883. mutex_lock(&ht->mutex);
  884. rhashtable_shrink(ht);
  885. mutex_unlock(&ht->mutex);
  886. rcu_read_lock();
  887. pr_info(" Verifying lookups...\n");
  888. test_rht_lookup(ht);
  889. rcu_read_unlock();
  890. }
  891. rcu_read_lock();
  892. test_bucket_stats(ht, true);
  893. rcu_read_unlock();
  894. pr_info(" Deleting %d keys\n", TEST_ENTRIES);
  895. for (i = 0; i < TEST_ENTRIES; i++) {
  896. u32 key = i * 2;
  897. obj = rhashtable_lookup(ht, &key);
  898. BUG_ON(!obj);
  899. rhashtable_remove(ht, &obj->node);
  900. kfree(obj);
  901. }
  902. return 0;
  903. error:
  904. tbl = rht_dereference_rcu(ht->tbl, ht);
  905. for (i = 0; i < tbl->size; i++)
  906. rht_for_each_entry_safe(obj, pos, next, tbl, i, node)
  907. kfree(obj);
  908. return err;
  909. }
  910. static int __init test_rht_init(void)
  911. {
  912. struct rhashtable ht;
  913. struct rhashtable_params params = {
  914. .nelem_hint = TEST_HT_SIZE,
  915. .head_offset = offsetof(struct test_obj, node),
  916. .key_offset = offsetof(struct test_obj, value),
  917. .key_len = sizeof(int),
  918. .hashfn = jhash,
  919. .nulls_base = (3U << RHT_BASE_SHIFT),
  920. .grow_decision = rht_grow_above_75,
  921. .shrink_decision = rht_shrink_below_30,
  922. };
  923. int err;
  924. pr_info("Running resizable hashtable tests...\n");
  925. err = rhashtable_init(&ht, &params);
  926. if (err < 0) {
  927. pr_warn("Test failed: Unable to initialize hashtable: %d\n",
  928. err);
  929. return err;
  930. }
  931. err = test_rhashtable(&ht);
  932. rhashtable_destroy(&ht);
  933. return err;
  934. }
  935. subsys_initcall(test_rht_init);
  936. #endif /* CONFIG_TEST_RHASHTABLE */