rhashtable.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140
  1. /*
  2. * Resizable, Scalable, Concurrent Hash Table
  3. *
  4. * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
  5. * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
  6. *
  7. * Based on the following paper:
  8. * https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf
  9. *
  10. * Code partially derived from nft_hash
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License version 2 as
  14. * published by the Free Software Foundation.
  15. */
  16. #include <linux/kernel.h>
  17. #include <linux/init.h>
  18. #include <linux/log2.h>
  19. #include <linux/slab.h>
  20. #include <linux/vmalloc.h>
  21. #include <linux/mm.h>
  22. #include <linux/jhash.h>
  23. #include <linux/random.h>
  24. #include <linux/rhashtable.h>
  25. #include <linux/err.h>
  26. #define HASH_DEFAULT_SIZE 64UL
  27. #define HASH_MIN_SIZE 4UL
  28. #define BUCKET_LOCKS_PER_CPU 128UL
  29. /* Base bits plus 1 bit for nulls marker */
  30. #define HASH_RESERVED_SPACE (RHT_BASE_BITS + 1)
  31. enum {
  32. RHT_LOCK_NORMAL,
  33. RHT_LOCK_NESTED,
  34. };
  35. /* The bucket lock is selected based on the hash and protects mutations
  36. * on a group of hash buckets.
  37. *
  38. * A maximum of tbl->size/2 bucket locks is allocated. This ensures that
  39. * a single lock always covers both buckets which may both contains
  40. * entries which link to the same bucket of the old table during resizing.
  41. * This allows to simplify the locking as locking the bucket in both
  42. * tables during resize always guarantee protection.
  43. *
  44. * IMPORTANT: When holding the bucket lock of both the old and new table
  45. * during expansions and shrinking, the old bucket lock must always be
  46. * acquired first.
  47. */
  48. static spinlock_t *bucket_lock(const struct bucket_table *tbl, u32 hash)
  49. {
  50. return &tbl->locks[hash & tbl->locks_mask];
  51. }
  52. static void *rht_obj(const struct rhashtable *ht, const struct rhash_head *he)
  53. {
  54. return (void *) he - ht->p.head_offset;
  55. }
  56. static u32 rht_bucket_index(const struct bucket_table *tbl, u32 hash)
  57. {
  58. return hash & (tbl->size - 1);
  59. }
  60. static u32 obj_raw_hashfn(const struct rhashtable *ht, const void *ptr)
  61. {
  62. u32 hash;
  63. if (unlikely(!ht->p.key_len))
  64. hash = ht->p.obj_hashfn(ptr, ht->p.hash_rnd);
  65. else
  66. hash = ht->p.hashfn(ptr + ht->p.key_offset, ht->p.key_len,
  67. ht->p.hash_rnd);
  68. return hash >> HASH_RESERVED_SPACE;
  69. }
  70. static u32 key_hashfn(struct rhashtable *ht, const void *key, u32 len)
  71. {
  72. return ht->p.hashfn(key, len, ht->p.hash_rnd) >> HASH_RESERVED_SPACE;
  73. }
  74. static u32 head_hashfn(const struct rhashtable *ht,
  75. const struct bucket_table *tbl,
  76. const struct rhash_head *he)
  77. {
  78. return rht_bucket_index(tbl, obj_raw_hashfn(ht, rht_obj(ht, he)));
  79. }
  80. #ifdef CONFIG_PROVE_LOCKING
  81. static void debug_dump_buckets(const struct rhashtable *ht,
  82. const struct bucket_table *tbl)
  83. {
  84. struct rhash_head *he;
  85. unsigned int i, hash;
  86. for (i = 0; i < tbl->size; i++) {
  87. pr_warn(" [Bucket %d] ", i);
  88. rht_for_each_rcu(he, tbl, i) {
  89. hash = head_hashfn(ht, tbl, he);
  90. pr_cont("[hash = %#x, lock = %p] ",
  91. hash, bucket_lock(tbl, hash));
  92. }
  93. pr_cont("\n");
  94. }
  95. }
  96. static void debug_dump_table(struct rhashtable *ht,
  97. const struct bucket_table *tbl,
  98. unsigned int hash)
  99. {
  100. struct bucket_table *old_tbl, *future_tbl;
  101. pr_emerg("BUG: lock for hash %#x in table %p not held\n",
  102. hash, tbl);
  103. rcu_read_lock();
  104. future_tbl = rht_dereference_rcu(ht->future_tbl, ht);
  105. old_tbl = rht_dereference_rcu(ht->tbl, ht);
  106. if (future_tbl != old_tbl) {
  107. pr_warn("Future table %p (size: %zd)\n",
  108. future_tbl, future_tbl->size);
  109. debug_dump_buckets(ht, future_tbl);
  110. }
  111. pr_warn("Table %p (size: %zd)\n", old_tbl, old_tbl->size);
  112. debug_dump_buckets(ht, old_tbl);
  113. rcu_read_unlock();
  114. }
  115. #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
  116. #define ASSERT_BUCKET_LOCK(HT, TBL, HASH) \
  117. do { \
  118. if (unlikely(!lockdep_rht_bucket_is_held(TBL, HASH))) { \
  119. debug_dump_table(HT, TBL, HASH); \
  120. BUG(); \
  121. } \
  122. } while (0)
  123. int lockdep_rht_mutex_is_held(struct rhashtable *ht)
  124. {
  125. return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1;
  126. }
  127. EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
  128. int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
  129. {
  130. spinlock_t *lock = bucket_lock(tbl, hash);
  131. return (debug_locks) ? lockdep_is_held(lock) : 1;
  132. }
  133. EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
  134. #else
  135. #define ASSERT_RHT_MUTEX(HT)
  136. #define ASSERT_BUCKET_LOCK(HT, TBL, HASH)
  137. #endif
  138. static struct rhash_head __rcu **bucket_tail(struct bucket_table *tbl, u32 n)
  139. {
  140. struct rhash_head __rcu **pprev;
  141. for (pprev = &tbl->buckets[n];
  142. !rht_is_a_nulls(rht_dereference_bucket(*pprev, tbl, n));
  143. pprev = &rht_dereference_bucket(*pprev, tbl, n)->next)
  144. ;
  145. return pprev;
  146. }
  147. static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl)
  148. {
  149. unsigned int i, size;
  150. #if defined(CONFIG_PROVE_LOCKING)
  151. unsigned int nr_pcpus = 2;
  152. #else
  153. unsigned int nr_pcpus = num_possible_cpus();
  154. #endif
  155. nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL);
  156. size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul);
  157. /* Never allocate more than 0.5 locks per bucket */
  158. size = min_t(unsigned int, size, tbl->size >> 1);
  159. if (sizeof(spinlock_t) != 0) {
  160. #ifdef CONFIG_NUMA
  161. if (size * sizeof(spinlock_t) > PAGE_SIZE)
  162. tbl->locks = vmalloc(size * sizeof(spinlock_t));
  163. else
  164. #endif
  165. tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
  166. GFP_KERNEL);
  167. if (!tbl->locks)
  168. return -ENOMEM;
  169. for (i = 0; i < size; i++)
  170. spin_lock_init(&tbl->locks[i]);
  171. }
  172. tbl->locks_mask = size - 1;
  173. return 0;
  174. }
  175. static void bucket_table_free(const struct bucket_table *tbl)
  176. {
  177. if (tbl)
  178. kvfree(tbl->locks);
  179. kvfree(tbl);
  180. }
  181. static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
  182. size_t nbuckets)
  183. {
  184. struct bucket_table *tbl;
  185. size_t size;
  186. int i;
  187. size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
  188. tbl = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
  189. if (tbl == NULL)
  190. tbl = vzalloc(size);
  191. if (tbl == NULL)
  192. return NULL;
  193. tbl->size = nbuckets;
  194. if (alloc_bucket_locks(ht, tbl) < 0) {
  195. bucket_table_free(tbl);
  196. return NULL;
  197. }
  198. for (i = 0; i < nbuckets; i++)
  199. INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i);
  200. return tbl;
  201. }
  202. /**
  203. * rht_grow_above_75 - returns true if nelems > 0.75 * table-size
  204. * @ht: hash table
  205. * @new_size: new table size
  206. */
  207. bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size)
  208. {
  209. /* Expand table when exceeding 75% load */
  210. return atomic_read(&ht->nelems) > (new_size / 4 * 3) &&
  211. (ht->p.max_shift && atomic_read(&ht->shift) < ht->p.max_shift);
  212. }
  213. EXPORT_SYMBOL_GPL(rht_grow_above_75);
  214. /**
  215. * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
  216. * @ht: hash table
  217. * @new_size: new table size
  218. */
  219. bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size)
  220. {
  221. /* Shrink table beneath 30% load */
  222. return atomic_read(&ht->nelems) < (new_size * 3 / 10) &&
  223. (atomic_read(&ht->shift) > ht->p.min_shift);
  224. }
  225. EXPORT_SYMBOL_GPL(rht_shrink_below_30);
  226. static void lock_buckets(struct bucket_table *new_tbl,
  227. struct bucket_table *old_tbl, unsigned int hash)
  228. __acquires(old_bucket_lock)
  229. {
  230. spin_lock_bh(bucket_lock(old_tbl, hash));
  231. if (new_tbl != old_tbl)
  232. spin_lock_bh_nested(bucket_lock(new_tbl, hash),
  233. RHT_LOCK_NESTED);
  234. }
  235. static void unlock_buckets(struct bucket_table *new_tbl,
  236. struct bucket_table *old_tbl, unsigned int hash)
  237. __releases(old_bucket_lock)
  238. {
  239. if (new_tbl != old_tbl)
  240. spin_unlock_bh(bucket_lock(new_tbl, hash));
  241. spin_unlock_bh(bucket_lock(old_tbl, hash));
  242. }
  243. /**
  244. * Unlink entries on bucket which hash to different bucket.
  245. *
  246. * Returns true if no more work needs to be performed on the bucket.
  247. */
  248. static bool hashtable_chain_unzip(struct rhashtable *ht,
  249. const struct bucket_table *new_tbl,
  250. struct bucket_table *old_tbl,
  251. size_t old_hash)
  252. {
  253. struct rhash_head *he, *p, *next;
  254. unsigned int new_hash, new_hash2;
  255. ASSERT_BUCKET_LOCK(ht, old_tbl, old_hash);
  256. /* Old bucket empty, no work needed. */
  257. p = rht_dereference_bucket(old_tbl->buckets[old_hash], old_tbl,
  258. old_hash);
  259. if (rht_is_a_nulls(p))
  260. return false;
  261. new_hash = head_hashfn(ht, new_tbl, p);
  262. ASSERT_BUCKET_LOCK(ht, new_tbl, new_hash);
  263. /* Advance the old bucket pointer one or more times until it
  264. * reaches a node that doesn't hash to the same bucket as the
  265. * previous node p. Call the previous node p;
  266. */
  267. rht_for_each_continue(he, p->next, old_tbl, old_hash) {
  268. new_hash2 = head_hashfn(ht, new_tbl, he);
  269. ASSERT_BUCKET_LOCK(ht, new_tbl, new_hash2);
  270. if (new_hash != new_hash2)
  271. break;
  272. p = he;
  273. }
  274. rcu_assign_pointer(old_tbl->buckets[old_hash], p->next);
  275. /* Find the subsequent node which does hash to the same
  276. * bucket as node P, or NULL if no such node exists.
  277. */
  278. INIT_RHT_NULLS_HEAD(next, ht, old_hash);
  279. if (!rht_is_a_nulls(he)) {
  280. rht_for_each_continue(he, he->next, old_tbl, old_hash) {
  281. if (head_hashfn(ht, new_tbl, he) == new_hash) {
  282. next = he;
  283. break;
  284. }
  285. }
  286. }
  287. /* Set p's next pointer to that subsequent node pointer,
  288. * bypassing the nodes which do not hash to p's bucket
  289. */
  290. rcu_assign_pointer(p->next, next);
  291. p = rht_dereference_bucket(old_tbl->buckets[old_hash], old_tbl,
  292. old_hash);
  293. return !rht_is_a_nulls(p);
  294. }
  295. static void link_old_to_new(struct rhashtable *ht, struct bucket_table *new_tbl,
  296. unsigned int new_hash, struct rhash_head *entry)
  297. {
  298. ASSERT_BUCKET_LOCK(ht, new_tbl, new_hash);
  299. rcu_assign_pointer(*bucket_tail(new_tbl, new_hash), entry);
  300. }
  301. /**
  302. * rhashtable_expand - Expand hash table while allowing concurrent lookups
  303. * @ht: the hash table to expand
  304. *
  305. * A secondary bucket array is allocated and the hash entries are migrated
  306. * while keeping them on both lists until the end of the RCU grace period.
  307. *
  308. * This function may only be called in a context where it is safe to call
  309. * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
  310. *
  311. * The caller must ensure that no concurrent resizing occurs by holding
  312. * ht->mutex.
  313. *
  314. * It is valid to have concurrent insertions and deletions protected by per
  315. * bucket locks or concurrent RCU protected lookups and traversals.
  316. */
  317. int rhashtable_expand(struct rhashtable *ht)
  318. {
  319. struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
  320. struct rhash_head *he;
  321. unsigned int new_hash, old_hash;
  322. bool complete = false;
  323. ASSERT_RHT_MUTEX(ht);
  324. new_tbl = bucket_table_alloc(ht, old_tbl->size * 2);
  325. if (new_tbl == NULL)
  326. return -ENOMEM;
  327. atomic_inc(&ht->shift);
  328. /* Make insertions go into the new, empty table right away. Deletions
  329. * and lookups will be attempted in both tables until we synchronize.
  330. * The synchronize_rcu() guarantees for the new table to be picked up
  331. * so no new additions go into the old table while we relink.
  332. */
  333. rcu_assign_pointer(ht->future_tbl, new_tbl);
  334. synchronize_rcu();
  335. /* For each new bucket, search the corresponding old bucket for the
  336. * first entry that hashes to the new bucket, and link the end of
  337. * newly formed bucket chain (containing entries added to future
  338. * table) to that entry. Since all the entries which will end up in
  339. * the new bucket appear in the same old bucket, this constructs an
  340. * entirely valid new hash table, but with multiple buckets
  341. * "zipped" together into a single imprecise chain.
  342. */
  343. for (new_hash = 0; new_hash < new_tbl->size; new_hash++) {
  344. old_hash = rht_bucket_index(old_tbl, new_hash);
  345. lock_buckets(new_tbl, old_tbl, new_hash);
  346. rht_for_each(he, old_tbl, old_hash) {
  347. if (head_hashfn(ht, new_tbl, he) == new_hash) {
  348. link_old_to_new(ht, new_tbl, new_hash, he);
  349. break;
  350. }
  351. }
  352. unlock_buckets(new_tbl, old_tbl, new_hash);
  353. }
  354. /* Unzip interleaved hash chains */
  355. while (!complete && !ht->being_destroyed) {
  356. /* Wait for readers. All new readers will see the new
  357. * table, and thus no references to the old table will
  358. * remain.
  359. */
  360. synchronize_rcu();
  361. /* For each bucket in the old table (each of which
  362. * contains items from multiple buckets of the new
  363. * table): ...
  364. */
  365. complete = true;
  366. for (old_hash = 0; old_hash < old_tbl->size; old_hash++) {
  367. lock_buckets(new_tbl, old_tbl, old_hash);
  368. if (hashtable_chain_unzip(ht, new_tbl, old_tbl,
  369. old_hash))
  370. complete = false;
  371. unlock_buckets(new_tbl, old_tbl, old_hash);
  372. }
  373. }
  374. rcu_assign_pointer(ht->tbl, new_tbl);
  375. synchronize_rcu();
  376. bucket_table_free(old_tbl);
  377. return 0;
  378. }
  379. EXPORT_SYMBOL_GPL(rhashtable_expand);
  380. /**
  381. * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
  382. * @ht: the hash table to shrink
  383. *
  384. * This function may only be called in a context where it is safe to call
  385. * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
  386. *
  387. * The caller must ensure that no concurrent resizing occurs by holding
  388. * ht->mutex.
  389. *
  390. * The caller must ensure that no concurrent table mutations take place.
  391. * It is however valid to have concurrent lookups if they are RCU protected.
  392. *
  393. * It is valid to have concurrent insertions and deletions protected by per
  394. * bucket locks or concurrent RCU protected lookups and traversals.
  395. */
  396. int rhashtable_shrink(struct rhashtable *ht)
  397. {
  398. struct bucket_table *new_tbl, *tbl = rht_dereference(ht->tbl, ht);
  399. unsigned int new_hash;
  400. ASSERT_RHT_MUTEX(ht);
  401. new_tbl = bucket_table_alloc(ht, tbl->size / 2);
  402. if (new_tbl == NULL)
  403. return -ENOMEM;
  404. rcu_assign_pointer(ht->future_tbl, new_tbl);
  405. synchronize_rcu();
  406. /* Link the first entry in the old bucket to the end of the
  407. * bucket in the new table. As entries are concurrently being
  408. * added to the new table, lock down the new bucket. As we
  409. * always divide the size in half when shrinking, each bucket
  410. * in the new table maps to exactly two buckets in the old
  411. * table.
  412. */
  413. for (new_hash = 0; new_hash < new_tbl->size; new_hash++) {
  414. lock_buckets(new_tbl, tbl, new_hash);
  415. rcu_assign_pointer(*bucket_tail(new_tbl, new_hash),
  416. tbl->buckets[new_hash]);
  417. ASSERT_BUCKET_LOCK(ht, tbl, new_hash + new_tbl->size);
  418. rcu_assign_pointer(*bucket_tail(new_tbl, new_hash),
  419. tbl->buckets[new_hash + new_tbl->size]);
  420. unlock_buckets(new_tbl, tbl, new_hash);
  421. }
  422. /* Publish the new, valid hash table */
  423. rcu_assign_pointer(ht->tbl, new_tbl);
  424. atomic_dec(&ht->shift);
  425. /* Wait for readers. No new readers will have references to the
  426. * old hash table.
  427. */
  428. synchronize_rcu();
  429. bucket_table_free(tbl);
  430. return 0;
  431. }
  432. EXPORT_SYMBOL_GPL(rhashtable_shrink);
  433. static void rht_deferred_worker(struct work_struct *work)
  434. {
  435. struct rhashtable *ht;
  436. struct bucket_table *tbl;
  437. struct rhashtable_walker *walker;
  438. ht = container_of(work, struct rhashtable, run_work);
  439. mutex_lock(&ht->mutex);
  440. if (ht->being_destroyed)
  441. goto unlock;
  442. tbl = rht_dereference(ht->tbl, ht);
  443. list_for_each_entry(walker, &ht->walkers, list)
  444. walker->resize = true;
  445. if (ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size))
  446. rhashtable_expand(ht);
  447. else if (ht->p.shrink_decision && ht->p.shrink_decision(ht, tbl->size))
  448. rhashtable_shrink(ht);
  449. unlock:
  450. mutex_unlock(&ht->mutex);
  451. }
  452. static void rhashtable_wakeup_worker(struct rhashtable *ht)
  453. {
  454. struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
  455. struct bucket_table *new_tbl = rht_dereference_rcu(ht->future_tbl, ht);
  456. size_t size = tbl->size;
  457. /* Only adjust the table if no resizing is currently in progress. */
  458. if (tbl == new_tbl &&
  459. ((ht->p.grow_decision && ht->p.grow_decision(ht, size)) ||
  460. (ht->p.shrink_decision && ht->p.shrink_decision(ht, size))))
  461. schedule_work(&ht->run_work);
  462. }
  463. static void __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
  464. struct bucket_table *tbl, u32 hash)
  465. {
  466. struct rhash_head *head;
  467. hash = rht_bucket_index(tbl, hash);
  468. head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
  469. ASSERT_BUCKET_LOCK(ht, tbl, hash);
  470. if (rht_is_a_nulls(head))
  471. INIT_RHT_NULLS_HEAD(obj->next, ht, hash);
  472. else
  473. RCU_INIT_POINTER(obj->next, head);
  474. rcu_assign_pointer(tbl->buckets[hash], obj);
  475. atomic_inc(&ht->nelems);
  476. rhashtable_wakeup_worker(ht);
  477. }
  478. /**
  479. * rhashtable_insert - insert object into hash table
  480. * @ht: hash table
  481. * @obj: pointer to hash head inside object
  482. *
  483. * Will take a per bucket spinlock to protect against mutual mutations
  484. * on the same bucket. Multiple insertions may occur in parallel unless
  485. * they map to the same bucket lock.
  486. *
  487. * It is safe to call this function from atomic context.
  488. *
  489. * Will trigger an automatic deferred table resizing if the size grows
  490. * beyond the watermark indicated by grow_decision() which can be passed
  491. * to rhashtable_init().
  492. */
  493. void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj)
  494. {
  495. struct bucket_table *tbl, *old_tbl;
  496. unsigned hash;
  497. rcu_read_lock();
  498. tbl = rht_dereference_rcu(ht->future_tbl, ht);
  499. old_tbl = rht_dereference_rcu(ht->tbl, ht);
  500. hash = obj_raw_hashfn(ht, rht_obj(ht, obj));
  501. lock_buckets(tbl, old_tbl, hash);
  502. __rhashtable_insert(ht, obj, tbl, hash);
  503. unlock_buckets(tbl, old_tbl, hash);
  504. rcu_read_unlock();
  505. }
  506. EXPORT_SYMBOL_GPL(rhashtable_insert);
  507. /**
  508. * rhashtable_remove - remove object from hash table
  509. * @ht: hash table
  510. * @obj: pointer to hash head inside object
  511. *
  512. * Since the hash chain is single linked, the removal operation needs to
  513. * walk the bucket chain upon removal. The removal operation is thus
  514. * considerable slow if the hash table is not correctly sized.
  515. *
  516. * Will automatically shrink the table via rhashtable_expand() if the
  517. * shrink_decision function specified at rhashtable_init() returns true.
  518. *
  519. * The caller must ensure that no concurrent table mutations occur. It is
  520. * however valid to have concurrent lookups if they are RCU protected.
  521. */
  522. bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj)
  523. {
  524. struct bucket_table *tbl, *new_tbl, *old_tbl;
  525. struct rhash_head __rcu **pprev;
  526. struct rhash_head *he, *he2;
  527. unsigned int hash, new_hash;
  528. bool ret = false;
  529. rcu_read_lock();
  530. old_tbl = rht_dereference_rcu(ht->tbl, ht);
  531. tbl = new_tbl = rht_dereference_rcu(ht->future_tbl, ht);
  532. new_hash = obj_raw_hashfn(ht, rht_obj(ht, obj));
  533. lock_buckets(new_tbl, old_tbl, new_hash);
  534. restart:
  535. hash = rht_bucket_index(tbl, new_hash);
  536. pprev = &tbl->buckets[hash];
  537. rht_for_each(he, tbl, hash) {
  538. if (he != obj) {
  539. pprev = &he->next;
  540. continue;
  541. }
  542. ASSERT_BUCKET_LOCK(ht, tbl, hash);
  543. if (old_tbl->size > new_tbl->size && tbl == old_tbl &&
  544. !rht_is_a_nulls(obj->next) &&
  545. head_hashfn(ht, tbl, obj->next) != hash) {
  546. rcu_assign_pointer(*pprev, (struct rhash_head *) rht_marker(ht, hash));
  547. } else if (unlikely(old_tbl->size < new_tbl->size && tbl == new_tbl)) {
  548. rht_for_each_continue(he2, obj->next, tbl, hash) {
  549. if (head_hashfn(ht, tbl, he2) == hash) {
  550. rcu_assign_pointer(*pprev, he2);
  551. goto found;
  552. }
  553. }
  554. rcu_assign_pointer(*pprev, (struct rhash_head *) rht_marker(ht, hash));
  555. } else {
  556. rcu_assign_pointer(*pprev, obj->next);
  557. }
  558. found:
  559. ret = true;
  560. break;
  561. }
  562. /* The entry may be linked in either 'tbl', 'future_tbl', or both.
  563. * 'future_tbl' only exists for a short period of time during
  564. * resizing. Thus traversing both is fine and the added cost is
  565. * very rare.
  566. */
  567. if (tbl != old_tbl) {
  568. tbl = old_tbl;
  569. goto restart;
  570. }
  571. unlock_buckets(new_tbl, old_tbl, new_hash);
  572. if (ret) {
  573. atomic_dec(&ht->nelems);
  574. rhashtable_wakeup_worker(ht);
  575. }
  576. rcu_read_unlock();
  577. return ret;
  578. }
  579. EXPORT_SYMBOL_GPL(rhashtable_remove);
  580. struct rhashtable_compare_arg {
  581. struct rhashtable *ht;
  582. const void *key;
  583. };
  584. static bool rhashtable_compare(void *ptr, void *arg)
  585. {
  586. struct rhashtable_compare_arg *x = arg;
  587. struct rhashtable *ht = x->ht;
  588. return !memcmp(ptr + ht->p.key_offset, x->key, ht->p.key_len);
  589. }
  590. /**
  591. * rhashtable_lookup - lookup key in hash table
  592. * @ht: hash table
  593. * @key: pointer to key
  594. *
  595. * Computes the hash value for the key and traverses the bucket chain looking
  596. * for a entry with an identical key. The first matching entry is returned.
  597. *
  598. * This lookup function may only be used for fixed key hash table (key_len
  599. * parameter set). It will BUG() if used inappropriately.
  600. *
  601. * Lookups may occur in parallel with hashtable mutations and resizing.
  602. */
  603. void *rhashtable_lookup(struct rhashtable *ht, const void *key)
  604. {
  605. struct rhashtable_compare_arg arg = {
  606. .ht = ht,
  607. .key = key,
  608. };
  609. BUG_ON(!ht->p.key_len);
  610. return rhashtable_lookup_compare(ht, key, &rhashtable_compare, &arg);
  611. }
  612. EXPORT_SYMBOL_GPL(rhashtable_lookup);
  613. /**
  614. * rhashtable_lookup_compare - search hash table with compare function
  615. * @ht: hash table
  616. * @key: the pointer to the key
  617. * @compare: compare function, must return true on match
  618. * @arg: argument passed on to compare function
  619. *
  620. * Traverses the bucket chain behind the provided hash value and calls the
  621. * specified compare function for each entry.
  622. *
  623. * Lookups may occur in parallel with hashtable mutations and resizing.
  624. *
  625. * Returns the first entry on which the compare function returned true.
  626. */
  627. void *rhashtable_lookup_compare(struct rhashtable *ht, const void *key,
  628. bool (*compare)(void *, void *), void *arg)
  629. {
  630. const struct bucket_table *tbl, *old_tbl;
  631. struct rhash_head *he;
  632. u32 hash;
  633. rcu_read_lock();
  634. old_tbl = rht_dereference_rcu(ht->tbl, ht);
  635. tbl = rht_dereference_rcu(ht->future_tbl, ht);
  636. hash = key_hashfn(ht, key, ht->p.key_len);
  637. restart:
  638. rht_for_each_rcu(he, tbl, rht_bucket_index(tbl, hash)) {
  639. if (!compare(rht_obj(ht, he), arg))
  640. continue;
  641. rcu_read_unlock();
  642. return rht_obj(ht, he);
  643. }
  644. if (unlikely(tbl != old_tbl)) {
  645. tbl = old_tbl;
  646. goto restart;
  647. }
  648. rcu_read_unlock();
  649. return NULL;
  650. }
  651. EXPORT_SYMBOL_GPL(rhashtable_lookup_compare);
  652. /**
  653. * rhashtable_lookup_insert - lookup and insert object into hash table
  654. * @ht: hash table
  655. * @obj: pointer to hash head inside object
  656. *
  657. * Locks down the bucket chain in both the old and new table if a resize
  658. * is in progress to ensure that writers can't remove from the old table
  659. * and can't insert to the new table during the atomic operation of search
  660. * and insertion. Searches for duplicates in both the old and new table if
  661. * a resize is in progress.
  662. *
  663. * This lookup function may only be used for fixed key hash table (key_len
  664. * parameter set). It will BUG() if used inappropriately.
  665. *
  666. * It is safe to call this function from atomic context.
  667. *
  668. * Will trigger an automatic deferred table resizing if the size grows
  669. * beyond the watermark indicated by grow_decision() which can be passed
  670. * to rhashtable_init().
  671. */
  672. bool rhashtable_lookup_insert(struct rhashtable *ht, struct rhash_head *obj)
  673. {
  674. struct rhashtable_compare_arg arg = {
  675. .ht = ht,
  676. .key = rht_obj(ht, obj) + ht->p.key_offset,
  677. };
  678. BUG_ON(!ht->p.key_len);
  679. return rhashtable_lookup_compare_insert(ht, obj, &rhashtable_compare,
  680. &arg);
  681. }
  682. EXPORT_SYMBOL_GPL(rhashtable_lookup_insert);
  683. /**
  684. * rhashtable_lookup_compare_insert - search and insert object to hash table
  685. * with compare function
  686. * @ht: hash table
  687. * @obj: pointer to hash head inside object
  688. * @compare: compare function, must return true on match
  689. * @arg: argument passed on to compare function
  690. *
  691. * Locks down the bucket chain in both the old and new table if a resize
  692. * is in progress to ensure that writers can't remove from the old table
  693. * and can't insert to the new table during the atomic operation of search
  694. * and insertion. Searches for duplicates in both the old and new table if
  695. * a resize is in progress.
  696. *
  697. * Lookups may occur in parallel with hashtable mutations and resizing.
  698. *
  699. * Will trigger an automatic deferred table resizing if the size grows
  700. * beyond the watermark indicated by grow_decision() which can be passed
  701. * to rhashtable_init().
  702. */
  703. bool rhashtable_lookup_compare_insert(struct rhashtable *ht,
  704. struct rhash_head *obj,
  705. bool (*compare)(void *, void *),
  706. void *arg)
  707. {
  708. struct bucket_table *new_tbl, *old_tbl;
  709. u32 new_hash;
  710. bool success = true;
  711. BUG_ON(!ht->p.key_len);
  712. rcu_read_lock();
  713. old_tbl = rht_dereference_rcu(ht->tbl, ht);
  714. new_tbl = rht_dereference_rcu(ht->future_tbl, ht);
  715. new_hash = obj_raw_hashfn(ht, rht_obj(ht, obj));
  716. lock_buckets(new_tbl, old_tbl, new_hash);
  717. if (rhashtable_lookup_compare(ht, rht_obj(ht, obj) + ht->p.key_offset,
  718. compare, arg)) {
  719. success = false;
  720. goto exit;
  721. }
  722. __rhashtable_insert(ht, obj, new_tbl, new_hash);
  723. exit:
  724. unlock_buckets(new_tbl, old_tbl, new_hash);
  725. rcu_read_unlock();
  726. return success;
  727. }
  728. EXPORT_SYMBOL_GPL(rhashtable_lookup_compare_insert);
  729. /**
  730. * rhashtable_walk_init - Initialise an iterator
  731. * @ht: Table to walk over
  732. * @iter: Hash table Iterator
  733. *
  734. * This function prepares a hash table walk.
  735. *
  736. * Note that if you restart a walk after rhashtable_walk_stop you
  737. * may see the same object twice. Also, you may miss objects if
  738. * there are removals in between rhashtable_walk_stop and the next
  739. * call to rhashtable_walk_start.
  740. *
  741. * For a completely stable walk you should construct your own data
  742. * structure outside the hash table.
  743. *
  744. * This function may sleep so you must not call it from interrupt
  745. * context or with spin locks held.
  746. *
  747. * You must call rhashtable_walk_exit if this function returns
  748. * successfully.
  749. */
  750. int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter)
  751. {
  752. iter->ht = ht;
  753. iter->p = NULL;
  754. iter->slot = 0;
  755. iter->skip = 0;
  756. iter->walker = kmalloc(sizeof(*iter->walker), GFP_KERNEL);
  757. if (!iter->walker)
  758. return -ENOMEM;
  759. mutex_lock(&ht->mutex);
  760. list_add(&iter->walker->list, &ht->walkers);
  761. mutex_unlock(&ht->mutex);
  762. return 0;
  763. }
  764. EXPORT_SYMBOL_GPL(rhashtable_walk_init);
  765. /**
  766. * rhashtable_walk_exit - Free an iterator
  767. * @iter: Hash table Iterator
  768. *
  769. * This function frees resources allocated by rhashtable_walk_init.
  770. */
  771. void rhashtable_walk_exit(struct rhashtable_iter *iter)
  772. {
  773. mutex_lock(&iter->ht->mutex);
  774. list_del(&iter->walker->list);
  775. mutex_unlock(&iter->ht->mutex);
  776. kfree(iter->walker);
  777. }
  778. EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
  779. /**
  780. * rhashtable_walk_start - Start a hash table walk
  781. * @iter: Hash table iterator
  782. *
  783. * Start a hash table walk. Note that we take the RCU lock in all
  784. * cases including when we return an error. So you must always call
  785. * rhashtable_walk_stop to clean up.
  786. *
  787. * Returns zero if successful.
  788. *
  789. * Returns -EAGAIN if resize event occured. Note that the iterator
  790. * will rewind back to the beginning and you may use it immediately
  791. * by calling rhashtable_walk_next.
  792. */
  793. int rhashtable_walk_start(struct rhashtable_iter *iter)
  794. {
  795. rcu_read_lock();
  796. if (iter->walker->resize) {
  797. iter->slot = 0;
  798. iter->skip = 0;
  799. iter->walker->resize = false;
  800. return -EAGAIN;
  801. }
  802. return 0;
  803. }
  804. EXPORT_SYMBOL_GPL(rhashtable_walk_start);
  805. /**
  806. * rhashtable_walk_next - Return the next object and advance the iterator
  807. * @iter: Hash table iterator
  808. *
  809. * Note that you must call rhashtable_walk_stop when you are finished
  810. * with the walk.
  811. *
  812. * Returns the next object or NULL when the end of the table is reached.
  813. *
  814. * Returns -EAGAIN if resize event occured. Note that the iterator
  815. * will rewind back to the beginning and you may continue to use it.
  816. */
  817. void *rhashtable_walk_next(struct rhashtable_iter *iter)
  818. {
  819. const struct bucket_table *tbl;
  820. struct rhashtable *ht = iter->ht;
  821. struct rhash_head *p = iter->p;
  822. void *obj = NULL;
  823. tbl = rht_dereference_rcu(ht->tbl, ht);
  824. if (p) {
  825. p = rht_dereference_bucket_rcu(p->next, tbl, iter->slot);
  826. goto next;
  827. }
  828. for (; iter->slot < tbl->size; iter->slot++) {
  829. int skip = iter->skip;
  830. rht_for_each_rcu(p, tbl, iter->slot) {
  831. if (!skip)
  832. break;
  833. skip--;
  834. }
  835. next:
  836. if (!rht_is_a_nulls(p)) {
  837. iter->skip++;
  838. iter->p = p;
  839. obj = rht_obj(ht, p);
  840. goto out;
  841. }
  842. iter->skip = 0;
  843. }
  844. iter->p = NULL;
  845. out:
  846. if (iter->walker->resize) {
  847. iter->p = NULL;
  848. iter->slot = 0;
  849. iter->skip = 0;
  850. iter->walker->resize = false;
  851. return ERR_PTR(-EAGAIN);
  852. }
  853. return obj;
  854. }
  855. EXPORT_SYMBOL_GPL(rhashtable_walk_next);
  856. /**
  857. * rhashtable_walk_stop - Finish a hash table walk
  858. * @iter: Hash table iterator
  859. *
  860. * Finish a hash table walk.
  861. */
  862. void rhashtable_walk_stop(struct rhashtable_iter *iter)
  863. {
  864. rcu_read_unlock();
  865. iter->p = NULL;
  866. }
  867. EXPORT_SYMBOL_GPL(rhashtable_walk_stop);
  868. static size_t rounded_hashtable_size(struct rhashtable_params *params)
  869. {
  870. return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
  871. 1UL << params->min_shift);
  872. }
  873. /**
  874. * rhashtable_init - initialize a new hash table
  875. * @ht: hash table to be initialized
  876. * @params: configuration parameters
  877. *
  878. * Initializes a new hash table based on the provided configuration
  879. * parameters. A table can be configured either with a variable or
  880. * fixed length key:
  881. *
  882. * Configuration Example 1: Fixed length keys
  883. * struct test_obj {
  884. * int key;
  885. * void * my_member;
  886. * struct rhash_head node;
  887. * };
  888. *
  889. * struct rhashtable_params params = {
  890. * .head_offset = offsetof(struct test_obj, node),
  891. * .key_offset = offsetof(struct test_obj, key),
  892. * .key_len = sizeof(int),
  893. * .hashfn = jhash,
  894. * .nulls_base = (1U << RHT_BASE_SHIFT),
  895. * };
  896. *
  897. * Configuration Example 2: Variable length keys
  898. * struct test_obj {
  899. * [...]
  900. * struct rhash_head node;
  901. * };
  902. *
  903. * u32 my_hash_fn(const void *data, u32 seed)
  904. * {
  905. * struct test_obj *obj = data;
  906. *
  907. * return [... hash ...];
  908. * }
  909. *
  910. * struct rhashtable_params params = {
  911. * .head_offset = offsetof(struct test_obj, node),
  912. * .hashfn = jhash,
  913. * .obj_hashfn = my_hash_fn,
  914. * };
  915. */
  916. int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params)
  917. {
  918. struct bucket_table *tbl;
  919. size_t size;
  920. size = HASH_DEFAULT_SIZE;
  921. if ((params->key_len && !params->hashfn) ||
  922. (!params->key_len && !params->obj_hashfn))
  923. return -EINVAL;
  924. if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))
  925. return -EINVAL;
  926. params->min_shift = max_t(size_t, params->min_shift,
  927. ilog2(HASH_MIN_SIZE));
  928. if (params->nelem_hint)
  929. size = rounded_hashtable_size(params);
  930. memset(ht, 0, sizeof(*ht));
  931. mutex_init(&ht->mutex);
  932. memcpy(&ht->p, params, sizeof(*params));
  933. INIT_LIST_HEAD(&ht->walkers);
  934. if (params->locks_mul)
  935. ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
  936. else
  937. ht->p.locks_mul = BUCKET_LOCKS_PER_CPU;
  938. tbl = bucket_table_alloc(ht, size);
  939. if (tbl == NULL)
  940. return -ENOMEM;
  941. atomic_set(&ht->nelems, 0);
  942. atomic_set(&ht->shift, ilog2(tbl->size));
  943. RCU_INIT_POINTER(ht->tbl, tbl);
  944. RCU_INIT_POINTER(ht->future_tbl, tbl);
  945. if (!ht->p.hash_rnd)
  946. get_random_bytes(&ht->p.hash_rnd, sizeof(ht->p.hash_rnd));
  947. if (ht->p.grow_decision || ht->p.shrink_decision)
  948. INIT_WORK(&ht->run_work, rht_deferred_worker);
  949. return 0;
  950. }
  951. EXPORT_SYMBOL_GPL(rhashtable_init);
  952. /**
  953. * rhashtable_destroy - destroy hash table
  954. * @ht: the hash table to destroy
  955. *
  956. * Frees the bucket array. This function is not rcu safe, therefore the caller
  957. * has to make sure that no resizing may happen by unpublishing the hashtable
  958. * and waiting for the quiescent cycle before releasing the bucket array.
  959. */
  960. void rhashtable_destroy(struct rhashtable *ht)
  961. {
  962. ht->being_destroyed = true;
  963. if (ht->p.grow_decision || ht->p.shrink_decision)
  964. cancel_work_sync(&ht->run_work);
  965. mutex_lock(&ht->mutex);
  966. bucket_table_free(rht_dereference(ht->tbl, ht));
  967. mutex_unlock(&ht->mutex);
  968. }
  969. EXPORT_SYMBOL_GPL(rhashtable_destroy);