rhashtable.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257
  1. /*
  2. * Resizable, Scalable, Concurrent Hash Table
  3. *
  4. * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
  5. * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
  6. * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
  7. *
  8. * Code partially derived from nft_hash
  9. * Rewritten with rehash code from br_multicast plus single list
  10. * pointer as suggested by Josh Triplett
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License version 2 as
  14. * published by the Free Software Foundation.
  15. */
  16. #include <linux/atomic.h>
  17. #include <linux/kernel.h>
  18. #include <linux/init.h>
  19. #include <linux/log2.h>
  20. #include <linux/sched.h>
  21. #include <linux/rculist.h>
  22. #include <linux/slab.h>
  23. #include <linux/vmalloc.h>
  24. #include <linux/mm.h>
  25. #include <linux/jhash.h>
  26. #include <linux/random.h>
  27. #include <linux/rhashtable.h>
  28. #include <linux/err.h>
  29. #include <linux/export.h>
  30. #define HASH_DEFAULT_SIZE 64UL
  31. #define HASH_MIN_SIZE 4U
  32. #define BUCKET_LOCKS_PER_CPU 32UL
  33. union nested_table {
  34. union nested_table __rcu *table;
  35. struct rhash_head __rcu *bucket;
  36. };
  37. static u32 head_hashfn(struct rhashtable *ht,
  38. const struct bucket_table *tbl,
  39. const struct rhash_head *he)
  40. {
  41. return rht_head_hashfn(ht, tbl, he, ht->p);
  42. }
  43. #ifdef CONFIG_PROVE_LOCKING
  44. #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
  45. int lockdep_rht_mutex_is_held(struct rhashtable *ht)
  46. {
  47. return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1;
  48. }
  49. EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
  50. int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
  51. {
  52. spinlock_t *lock = rht_bucket_lock(tbl, hash);
  53. return (debug_locks) ? lockdep_is_held(lock) : 1;
  54. }
  55. EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
  56. #else
  57. #define ASSERT_RHT_MUTEX(HT)
  58. #endif
  59. static void nested_table_free(union nested_table *ntbl, unsigned int size)
  60. {
  61. const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
  62. const unsigned int len = 1 << shift;
  63. unsigned int i;
  64. ntbl = rcu_dereference_raw(ntbl->table);
  65. if (!ntbl)
  66. return;
  67. if (size > len) {
  68. size >>= shift;
  69. for (i = 0; i < len; i++)
  70. nested_table_free(ntbl + i, size);
  71. }
  72. kfree(ntbl);
  73. }
  74. static void nested_bucket_table_free(const struct bucket_table *tbl)
  75. {
  76. unsigned int size = tbl->size >> tbl->nest;
  77. unsigned int len = 1 << tbl->nest;
  78. union nested_table *ntbl;
  79. unsigned int i;
  80. ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
  81. for (i = 0; i < len; i++)
  82. nested_table_free(ntbl + i, size);
  83. kfree(ntbl);
  84. }
  85. static void bucket_table_free(const struct bucket_table *tbl)
  86. {
  87. if (tbl->nest)
  88. nested_bucket_table_free(tbl);
  89. free_bucket_spinlocks(tbl->locks);
  90. kvfree(tbl);
  91. }
  92. static void bucket_table_free_rcu(struct rcu_head *head)
  93. {
  94. bucket_table_free(container_of(head, struct bucket_table, rcu));
  95. }
  96. static union nested_table *nested_table_alloc(struct rhashtable *ht,
  97. union nested_table __rcu **prev,
  98. unsigned int shifted,
  99. unsigned int nhash)
  100. {
  101. union nested_table *ntbl;
  102. int i;
  103. ntbl = rcu_dereference(*prev);
  104. if (ntbl)
  105. return ntbl;
  106. ntbl = kzalloc(PAGE_SIZE, GFP_ATOMIC);
  107. if (ntbl && shifted) {
  108. for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0].bucket); i++)
  109. INIT_RHT_NULLS_HEAD(ntbl[i].bucket, ht,
  110. (i << shifted) | nhash);
  111. }
  112. rcu_assign_pointer(*prev, ntbl);
  113. return ntbl;
  114. }
  115. static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht,
  116. size_t nbuckets,
  117. gfp_t gfp)
  118. {
  119. const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
  120. struct bucket_table *tbl;
  121. size_t size;
  122. if (nbuckets < (1 << (shift + 1)))
  123. return NULL;
  124. size = sizeof(*tbl) + sizeof(tbl->buckets[0]);
  125. tbl = kzalloc(size, gfp);
  126. if (!tbl)
  127. return NULL;
  128. if (!nested_table_alloc(ht, (union nested_table __rcu **)tbl->buckets,
  129. 0, 0)) {
  130. kfree(tbl);
  131. return NULL;
  132. }
  133. tbl->nest = (ilog2(nbuckets) - 1) % shift + 1;
  134. return tbl;
  135. }
  136. static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
  137. size_t nbuckets,
  138. gfp_t gfp)
  139. {
  140. struct bucket_table *tbl = NULL;
  141. size_t size, max_locks;
  142. int i;
  143. size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
  144. if (gfp != GFP_KERNEL)
  145. tbl = kzalloc(size, gfp | __GFP_NOWARN | __GFP_NORETRY);
  146. else
  147. tbl = kvzalloc(size, gfp);
  148. size = nbuckets;
  149. if (tbl == NULL && gfp != GFP_KERNEL) {
  150. tbl = nested_bucket_table_alloc(ht, nbuckets, gfp);
  151. nbuckets = 0;
  152. }
  153. if (tbl == NULL)
  154. return NULL;
  155. tbl->size = size;
  156. max_locks = size >> 1;
  157. if (tbl->nest)
  158. max_locks = min_t(size_t, max_locks, 1U << tbl->nest);
  159. if (alloc_bucket_spinlocks(&tbl->locks, &tbl->locks_mask, max_locks,
  160. ht->p.locks_mul, gfp) < 0) {
  161. bucket_table_free(tbl);
  162. return NULL;
  163. }
  164. INIT_LIST_HEAD(&tbl->walkers);
  165. tbl->hash_rnd = get_random_u32();
  166. for (i = 0; i < nbuckets; i++)
  167. INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i);
  168. return tbl;
  169. }
  170. static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
  171. struct bucket_table *tbl)
  172. {
  173. struct bucket_table *new_tbl;
  174. do {
  175. new_tbl = tbl;
  176. tbl = rht_dereference_rcu(tbl->future_tbl, ht);
  177. } while (tbl);
  178. return new_tbl;
  179. }
  180. static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash)
  181. {
  182. struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
  183. struct bucket_table *new_tbl = rhashtable_last_table(ht,
  184. rht_dereference_rcu(old_tbl->future_tbl, ht));
  185. struct rhash_head __rcu **pprev = rht_bucket_var(old_tbl, old_hash);
  186. int err = -EAGAIN;
  187. struct rhash_head *head, *next, *entry;
  188. spinlock_t *new_bucket_lock;
  189. unsigned int new_hash;
  190. if (new_tbl->nest)
  191. goto out;
  192. err = -ENOENT;
  193. rht_for_each(entry, old_tbl, old_hash) {
  194. err = 0;
  195. next = rht_dereference_bucket(entry->next, old_tbl, old_hash);
  196. if (rht_is_a_nulls(next))
  197. break;
  198. pprev = &entry->next;
  199. }
  200. if (err)
  201. goto out;
  202. new_hash = head_hashfn(ht, new_tbl, entry);
  203. new_bucket_lock = rht_bucket_lock(new_tbl, new_hash);
  204. spin_lock_nested(new_bucket_lock, SINGLE_DEPTH_NESTING);
  205. head = rht_dereference_bucket(new_tbl->buckets[new_hash],
  206. new_tbl, new_hash);
  207. RCU_INIT_POINTER(entry->next, head);
  208. rcu_assign_pointer(new_tbl->buckets[new_hash], entry);
  209. spin_unlock(new_bucket_lock);
  210. rcu_assign_pointer(*pprev, next);
  211. out:
  212. return err;
  213. }
  214. static int rhashtable_rehash_chain(struct rhashtable *ht,
  215. unsigned int old_hash)
  216. {
  217. struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
  218. spinlock_t *old_bucket_lock;
  219. int err;
  220. old_bucket_lock = rht_bucket_lock(old_tbl, old_hash);
  221. spin_lock_bh(old_bucket_lock);
  222. while (!(err = rhashtable_rehash_one(ht, old_hash)))
  223. ;
  224. if (err == -ENOENT) {
  225. old_tbl->rehash++;
  226. err = 0;
  227. }
  228. spin_unlock_bh(old_bucket_lock);
  229. return err;
  230. }
  231. static int rhashtable_rehash_attach(struct rhashtable *ht,
  232. struct bucket_table *old_tbl,
  233. struct bucket_table *new_tbl)
  234. {
  235. /* Protect future_tbl using the first bucket lock. */
  236. spin_lock_bh(old_tbl->locks);
  237. /* Did somebody beat us to it? */
  238. if (rcu_access_pointer(old_tbl->future_tbl)) {
  239. spin_unlock_bh(old_tbl->locks);
  240. return -EEXIST;
  241. }
  242. /* Make insertions go into the new, empty table right away. Deletions
  243. * and lookups will be attempted in both tables until we synchronize.
  244. */
  245. rcu_assign_pointer(old_tbl->future_tbl, new_tbl);
  246. spin_unlock_bh(old_tbl->locks);
  247. return 0;
  248. }
  249. static int rhashtable_rehash_table(struct rhashtable *ht)
  250. {
  251. struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
  252. struct bucket_table *new_tbl;
  253. struct rhashtable_walker *walker;
  254. unsigned int old_hash;
  255. int err;
  256. new_tbl = rht_dereference(old_tbl->future_tbl, ht);
  257. if (!new_tbl)
  258. return 0;
  259. for (old_hash = 0; old_hash < old_tbl->size; old_hash++) {
  260. err = rhashtable_rehash_chain(ht, old_hash);
  261. if (err)
  262. return err;
  263. cond_resched();
  264. }
  265. /* Publish the new table pointer. */
  266. rcu_assign_pointer(ht->tbl, new_tbl);
  267. spin_lock(&ht->lock);
  268. list_for_each_entry(walker, &old_tbl->walkers, list)
  269. walker->tbl = NULL;
  270. spin_unlock(&ht->lock);
  271. /* Wait for readers. All new readers will see the new
  272. * table, and thus no references to the old table will
  273. * remain.
  274. */
  275. call_rcu(&old_tbl->rcu, bucket_table_free_rcu);
  276. return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0;
  277. }
  278. static int rhashtable_rehash_alloc(struct rhashtable *ht,
  279. struct bucket_table *old_tbl,
  280. unsigned int size)
  281. {
  282. struct bucket_table *new_tbl;
  283. int err;
  284. ASSERT_RHT_MUTEX(ht);
  285. new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
  286. if (new_tbl == NULL)
  287. return -ENOMEM;
  288. err = rhashtable_rehash_attach(ht, old_tbl, new_tbl);
  289. if (err)
  290. bucket_table_free(new_tbl);
  291. return err;
  292. }
  293. /**
  294. * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
  295. * @ht: the hash table to shrink
  296. *
  297. * This function shrinks the hash table to fit, i.e., the smallest
  298. * size would not cause it to expand right away automatically.
  299. *
  300. * The caller must ensure that no concurrent resizing occurs by holding
  301. * ht->mutex.
  302. *
  303. * The caller must ensure that no concurrent table mutations take place.
  304. * It is however valid to have concurrent lookups if they are RCU protected.
  305. *
  306. * It is valid to have concurrent insertions and deletions protected by per
  307. * bucket locks or concurrent RCU protected lookups and traversals.
  308. */
  309. static int rhashtable_shrink(struct rhashtable *ht)
  310. {
  311. struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
  312. unsigned int nelems = atomic_read(&ht->nelems);
  313. unsigned int size = 0;
  314. if (nelems)
  315. size = roundup_pow_of_two(nelems * 3 / 2);
  316. if (size < ht->p.min_size)
  317. size = ht->p.min_size;
  318. if (old_tbl->size <= size)
  319. return 0;
  320. if (rht_dereference(old_tbl->future_tbl, ht))
  321. return -EEXIST;
  322. return rhashtable_rehash_alloc(ht, old_tbl, size);
  323. }
  324. static void rht_deferred_worker(struct work_struct *work)
  325. {
  326. struct rhashtable *ht;
  327. struct bucket_table *tbl;
  328. int err = 0;
  329. ht = container_of(work, struct rhashtable, run_work);
  330. mutex_lock(&ht->mutex);
  331. tbl = rht_dereference(ht->tbl, ht);
  332. tbl = rhashtable_last_table(ht, tbl);
  333. if (rht_grow_above_75(ht, tbl))
  334. err = rhashtable_rehash_alloc(ht, tbl, tbl->size * 2);
  335. else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl))
  336. err = rhashtable_shrink(ht);
  337. else if (tbl->nest)
  338. err = rhashtable_rehash_alloc(ht, tbl, tbl->size);
  339. if (!err)
  340. err = rhashtable_rehash_table(ht);
  341. mutex_unlock(&ht->mutex);
  342. if (err)
  343. schedule_work(&ht->run_work);
  344. }
  345. static int rhashtable_insert_rehash(struct rhashtable *ht,
  346. struct bucket_table *tbl)
  347. {
  348. struct bucket_table *old_tbl;
  349. struct bucket_table *new_tbl;
  350. unsigned int size;
  351. int err;
  352. old_tbl = rht_dereference_rcu(ht->tbl, ht);
  353. size = tbl->size;
  354. err = -EBUSY;
  355. if (rht_grow_above_75(ht, tbl))
  356. size *= 2;
  357. /* Do not schedule more than one rehash */
  358. else if (old_tbl != tbl)
  359. goto fail;
  360. err = -ENOMEM;
  361. new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC);
  362. if (new_tbl == NULL)
  363. goto fail;
  364. err = rhashtable_rehash_attach(ht, tbl, new_tbl);
  365. if (err) {
  366. bucket_table_free(new_tbl);
  367. if (err == -EEXIST)
  368. err = 0;
  369. } else
  370. schedule_work(&ht->run_work);
  371. return err;
  372. fail:
  373. /* Do not fail the insert if someone else did a rehash. */
  374. if (likely(rcu_dereference_raw(tbl->future_tbl)))
  375. return 0;
  376. /* Schedule async rehash to retry allocation in process context. */
  377. if (err == -ENOMEM)
  378. schedule_work(&ht->run_work);
  379. return err;
  380. }
  381. static void *rhashtable_lookup_one(struct rhashtable *ht,
  382. struct bucket_table *tbl, unsigned int hash,
  383. const void *key, struct rhash_head *obj)
  384. {
  385. struct rhashtable_compare_arg arg = {
  386. .ht = ht,
  387. .key = key,
  388. };
  389. struct rhash_head __rcu **pprev;
  390. struct rhash_head *head;
  391. int elasticity;
  392. elasticity = RHT_ELASTICITY;
  393. pprev = rht_bucket_var(tbl, hash);
  394. rht_for_each_continue(head, *pprev, tbl, hash) {
  395. struct rhlist_head *list;
  396. struct rhlist_head *plist;
  397. elasticity--;
  398. if (!key ||
  399. (ht->p.obj_cmpfn ?
  400. ht->p.obj_cmpfn(&arg, rht_obj(ht, head)) :
  401. rhashtable_compare(&arg, rht_obj(ht, head)))) {
  402. pprev = &head->next;
  403. continue;
  404. }
  405. if (!ht->rhlist)
  406. return rht_obj(ht, head);
  407. list = container_of(obj, struct rhlist_head, rhead);
  408. plist = container_of(head, struct rhlist_head, rhead);
  409. RCU_INIT_POINTER(list->next, plist);
  410. head = rht_dereference_bucket(head->next, tbl, hash);
  411. RCU_INIT_POINTER(list->rhead.next, head);
  412. rcu_assign_pointer(*pprev, obj);
  413. return NULL;
  414. }
  415. if (elasticity <= 0)
  416. return ERR_PTR(-EAGAIN);
  417. return ERR_PTR(-ENOENT);
  418. }
  419. static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
  420. struct bucket_table *tbl,
  421. unsigned int hash,
  422. struct rhash_head *obj,
  423. void *data)
  424. {
  425. struct rhash_head __rcu **pprev;
  426. struct bucket_table *new_tbl;
  427. struct rhash_head *head;
  428. if (!IS_ERR_OR_NULL(data))
  429. return ERR_PTR(-EEXIST);
  430. if (PTR_ERR(data) != -EAGAIN && PTR_ERR(data) != -ENOENT)
  431. return ERR_CAST(data);
  432. new_tbl = rcu_dereference(tbl->future_tbl);
  433. if (new_tbl)
  434. return new_tbl;
  435. if (PTR_ERR(data) != -ENOENT)
  436. return ERR_CAST(data);
  437. if (unlikely(rht_grow_above_max(ht, tbl)))
  438. return ERR_PTR(-E2BIG);
  439. if (unlikely(rht_grow_above_100(ht, tbl)))
  440. return ERR_PTR(-EAGAIN);
  441. pprev = rht_bucket_insert(ht, tbl, hash);
  442. if (!pprev)
  443. return ERR_PTR(-ENOMEM);
  444. head = rht_dereference_bucket(*pprev, tbl, hash);
  445. RCU_INIT_POINTER(obj->next, head);
  446. if (ht->rhlist) {
  447. struct rhlist_head *list;
  448. list = container_of(obj, struct rhlist_head, rhead);
  449. RCU_INIT_POINTER(list->next, NULL);
  450. }
  451. rcu_assign_pointer(*pprev, obj);
  452. atomic_inc(&ht->nelems);
  453. if (rht_grow_above_75(ht, tbl))
  454. schedule_work(&ht->run_work);
  455. return NULL;
  456. }
  457. static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
  458. struct rhash_head *obj)
  459. {
  460. struct bucket_table *new_tbl;
  461. struct bucket_table *tbl;
  462. unsigned int hash;
  463. spinlock_t *lock;
  464. void *data;
  465. tbl = rcu_dereference(ht->tbl);
  466. /* All insertions must grab the oldest table containing
  467. * the hashed bucket that is yet to be rehashed.
  468. */
  469. for (;;) {
  470. hash = rht_head_hashfn(ht, tbl, obj, ht->p);
  471. lock = rht_bucket_lock(tbl, hash);
  472. spin_lock_bh(lock);
  473. if (tbl->rehash <= hash)
  474. break;
  475. spin_unlock_bh(lock);
  476. tbl = rcu_dereference(tbl->future_tbl);
  477. }
  478. data = rhashtable_lookup_one(ht, tbl, hash, key, obj);
  479. new_tbl = rhashtable_insert_one(ht, tbl, hash, obj, data);
  480. if (PTR_ERR(new_tbl) != -EEXIST)
  481. data = ERR_CAST(new_tbl);
  482. while (!IS_ERR_OR_NULL(new_tbl)) {
  483. tbl = new_tbl;
  484. hash = rht_head_hashfn(ht, tbl, obj, ht->p);
  485. spin_lock_nested(rht_bucket_lock(tbl, hash),
  486. SINGLE_DEPTH_NESTING);
  487. data = rhashtable_lookup_one(ht, tbl, hash, key, obj);
  488. new_tbl = rhashtable_insert_one(ht, tbl, hash, obj, data);
  489. if (PTR_ERR(new_tbl) != -EEXIST)
  490. data = ERR_CAST(new_tbl);
  491. spin_unlock(rht_bucket_lock(tbl, hash));
  492. }
  493. spin_unlock_bh(lock);
  494. if (PTR_ERR(data) == -EAGAIN)
  495. data = ERR_PTR(rhashtable_insert_rehash(ht, tbl) ?:
  496. -EAGAIN);
  497. return data;
  498. }
  499. void *rhashtable_insert_slow(struct rhashtable *ht, const void *key,
  500. struct rhash_head *obj)
  501. {
  502. void *data;
  503. do {
  504. rcu_read_lock();
  505. data = rhashtable_try_insert(ht, key, obj);
  506. rcu_read_unlock();
  507. } while (PTR_ERR(data) == -EAGAIN);
  508. return data;
  509. }
  510. EXPORT_SYMBOL_GPL(rhashtable_insert_slow);
  511. /**
  512. * rhashtable_walk_enter - Initialise an iterator
  513. * @ht: Table to walk over
  514. * @iter: Hash table Iterator
  515. *
  516. * This function prepares a hash table walk.
  517. *
  518. * Note that if you restart a walk after rhashtable_walk_stop you
  519. * may see the same object twice. Also, you may miss objects if
  520. * there are removals in between rhashtable_walk_stop and the next
  521. * call to rhashtable_walk_start.
  522. *
  523. * For a completely stable walk you should construct your own data
  524. * structure outside the hash table.
  525. *
  526. * This function may be called from any process context, including
  527. * non-preemptable context, but cannot be called from softirq or
  528. * hardirq context.
  529. *
  530. * You must call rhashtable_walk_exit after this function returns.
  531. */
  532. void rhashtable_walk_enter(struct rhashtable *ht, struct rhashtable_iter *iter)
  533. {
  534. iter->ht = ht;
  535. iter->p = NULL;
  536. iter->slot = 0;
  537. iter->skip = 0;
  538. iter->end_of_table = 0;
  539. spin_lock(&ht->lock);
  540. iter->walker.tbl =
  541. rcu_dereference_protected(ht->tbl, lockdep_is_held(&ht->lock));
  542. list_add(&iter->walker.list, &iter->walker.tbl->walkers);
  543. spin_unlock(&ht->lock);
  544. }
  545. EXPORT_SYMBOL_GPL(rhashtable_walk_enter);
  546. /**
  547. * rhashtable_walk_exit - Free an iterator
  548. * @iter: Hash table Iterator
  549. *
  550. * This function frees resources allocated by rhashtable_walk_init.
  551. */
  552. void rhashtable_walk_exit(struct rhashtable_iter *iter)
  553. {
  554. spin_lock(&iter->ht->lock);
  555. if (iter->walker.tbl)
  556. list_del(&iter->walker.list);
  557. spin_unlock(&iter->ht->lock);
  558. }
  559. EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
  560. /**
  561. * rhashtable_walk_start_check - Start a hash table walk
  562. * @iter: Hash table iterator
  563. *
  564. * Start a hash table walk at the current iterator position. Note that we take
  565. * the RCU lock in all cases including when we return an error. So you must
  566. * always call rhashtable_walk_stop to clean up.
  567. *
  568. * Returns zero if successful.
  569. *
  570. * Returns -EAGAIN if resize event occured. Note that the iterator
  571. * will rewind back to the beginning and you may use it immediately
  572. * by calling rhashtable_walk_next.
  573. *
  574. * rhashtable_walk_start is defined as an inline variant that returns
  575. * void. This is preferred in cases where the caller would ignore
  576. * resize events and always continue.
  577. */
  578. int rhashtable_walk_start_check(struct rhashtable_iter *iter)
  579. __acquires(RCU)
  580. {
  581. struct rhashtable *ht = iter->ht;
  582. bool rhlist = ht->rhlist;
  583. rcu_read_lock();
  584. spin_lock(&ht->lock);
  585. if (iter->walker.tbl)
  586. list_del(&iter->walker.list);
  587. spin_unlock(&ht->lock);
  588. if (iter->end_of_table)
  589. return 0;
  590. if (!iter->walker.tbl) {
  591. iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht);
  592. iter->slot = 0;
  593. iter->skip = 0;
  594. return -EAGAIN;
  595. }
  596. if (iter->p && !rhlist) {
  597. /*
  598. * We need to validate that 'p' is still in the table, and
  599. * if so, update 'skip'
  600. */
  601. struct rhash_head *p;
  602. int skip = 0;
  603. rht_for_each_rcu(p, iter->walker.tbl, iter->slot) {
  604. skip++;
  605. if (p == iter->p) {
  606. iter->skip = skip;
  607. goto found;
  608. }
  609. }
  610. iter->p = NULL;
  611. } else if (iter->p && rhlist) {
  612. /* Need to validate that 'list' is still in the table, and
  613. * if so, update 'skip' and 'p'.
  614. */
  615. struct rhash_head *p;
  616. struct rhlist_head *list;
  617. int skip = 0;
  618. rht_for_each_rcu(p, iter->walker.tbl, iter->slot) {
  619. for (list = container_of(p, struct rhlist_head, rhead);
  620. list;
  621. list = rcu_dereference(list->next)) {
  622. skip++;
  623. if (list == iter->list) {
  624. iter->p = p;
  625. iter->skip = skip;
  626. goto found;
  627. }
  628. }
  629. }
  630. iter->p = NULL;
  631. }
  632. found:
  633. return 0;
  634. }
  635. EXPORT_SYMBOL_GPL(rhashtable_walk_start_check);
  636. /**
  637. * __rhashtable_walk_find_next - Find the next element in a table (or the first
  638. * one in case of a new walk).
  639. *
  640. * @iter: Hash table iterator
  641. *
  642. * Returns the found object or NULL when the end of the table is reached.
  643. *
  644. * Returns -EAGAIN if resize event occurred.
  645. */
  646. static void *__rhashtable_walk_find_next(struct rhashtable_iter *iter)
  647. {
  648. struct bucket_table *tbl = iter->walker.tbl;
  649. struct rhlist_head *list = iter->list;
  650. struct rhashtable *ht = iter->ht;
  651. struct rhash_head *p = iter->p;
  652. bool rhlist = ht->rhlist;
  653. if (!tbl)
  654. return NULL;
  655. for (; iter->slot < tbl->size; iter->slot++) {
  656. int skip = iter->skip;
  657. rht_for_each_rcu(p, tbl, iter->slot) {
  658. if (rhlist) {
  659. list = container_of(p, struct rhlist_head,
  660. rhead);
  661. do {
  662. if (!skip)
  663. goto next;
  664. skip--;
  665. list = rcu_dereference(list->next);
  666. } while (list);
  667. continue;
  668. }
  669. if (!skip)
  670. break;
  671. skip--;
  672. }
  673. next:
  674. if (!rht_is_a_nulls(p)) {
  675. iter->skip++;
  676. iter->p = p;
  677. iter->list = list;
  678. return rht_obj(ht, rhlist ? &list->rhead : p);
  679. }
  680. iter->skip = 0;
  681. }
  682. iter->p = NULL;
  683. /* Ensure we see any new tables. */
  684. smp_rmb();
  685. iter->walker.tbl = rht_dereference_rcu(tbl->future_tbl, ht);
  686. if (iter->walker.tbl) {
  687. iter->slot = 0;
  688. iter->skip = 0;
  689. return ERR_PTR(-EAGAIN);
  690. } else {
  691. iter->end_of_table = true;
  692. }
  693. return NULL;
  694. }
  695. /**
  696. * rhashtable_walk_next - Return the next object and advance the iterator
  697. * @iter: Hash table iterator
  698. *
  699. * Note that you must call rhashtable_walk_stop when you are finished
  700. * with the walk.
  701. *
  702. * Returns the next object or NULL when the end of the table is reached.
  703. *
  704. * Returns -EAGAIN if resize event occurred. Note that the iterator
  705. * will rewind back to the beginning and you may continue to use it.
  706. */
  707. void *rhashtable_walk_next(struct rhashtable_iter *iter)
  708. {
  709. struct rhlist_head *list = iter->list;
  710. struct rhashtable *ht = iter->ht;
  711. struct rhash_head *p = iter->p;
  712. bool rhlist = ht->rhlist;
  713. if (p) {
  714. if (!rhlist || !(list = rcu_dereference(list->next))) {
  715. p = rcu_dereference(p->next);
  716. list = container_of(p, struct rhlist_head, rhead);
  717. }
  718. if (!rht_is_a_nulls(p)) {
  719. iter->skip++;
  720. iter->p = p;
  721. iter->list = list;
  722. return rht_obj(ht, rhlist ? &list->rhead : p);
  723. }
  724. /* At the end of this slot, switch to next one and then find
  725. * next entry from that point.
  726. */
  727. iter->skip = 0;
  728. iter->slot++;
  729. }
  730. return __rhashtable_walk_find_next(iter);
  731. }
  732. EXPORT_SYMBOL_GPL(rhashtable_walk_next);
  733. /**
  734. * rhashtable_walk_peek - Return the next object but don't advance the iterator
  735. * @iter: Hash table iterator
  736. *
  737. * Returns the next object or NULL when the end of the table is reached.
  738. *
  739. * Returns -EAGAIN if resize event occurred. Note that the iterator
  740. * will rewind back to the beginning and you may continue to use it.
  741. */
  742. void *rhashtable_walk_peek(struct rhashtable_iter *iter)
  743. {
  744. struct rhlist_head *list = iter->list;
  745. struct rhashtable *ht = iter->ht;
  746. struct rhash_head *p = iter->p;
  747. if (p)
  748. return rht_obj(ht, ht->rhlist ? &list->rhead : p);
  749. /* No object found in current iter, find next one in the table. */
  750. if (iter->skip) {
  751. /* A nonzero skip value points to the next entry in the table
  752. * beyond that last one that was found. Decrement skip so
  753. * we find the current value. __rhashtable_walk_find_next
  754. * will restore the original value of skip assuming that
  755. * the table hasn't changed.
  756. */
  757. iter->skip--;
  758. }
  759. return __rhashtable_walk_find_next(iter);
  760. }
  761. EXPORT_SYMBOL_GPL(rhashtable_walk_peek);
  762. /**
  763. * rhashtable_walk_stop - Finish a hash table walk
  764. * @iter: Hash table iterator
  765. *
  766. * Finish a hash table walk. Does not reset the iterator to the start of the
  767. * hash table.
  768. */
  769. void rhashtable_walk_stop(struct rhashtable_iter *iter)
  770. __releases(RCU)
  771. {
  772. struct rhashtable *ht;
  773. struct bucket_table *tbl = iter->walker.tbl;
  774. if (!tbl)
  775. goto out;
  776. ht = iter->ht;
  777. spin_lock(&ht->lock);
  778. if (tbl->rehash < tbl->size)
  779. list_add(&iter->walker.list, &tbl->walkers);
  780. else
  781. iter->walker.tbl = NULL;
  782. spin_unlock(&ht->lock);
  783. out:
  784. rcu_read_unlock();
  785. }
  786. EXPORT_SYMBOL_GPL(rhashtable_walk_stop);
  787. static size_t rounded_hashtable_size(const struct rhashtable_params *params)
  788. {
  789. size_t retsize;
  790. if (params->nelem_hint)
  791. retsize = max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
  792. (unsigned long)params->min_size);
  793. else
  794. retsize = max(HASH_DEFAULT_SIZE,
  795. (unsigned long)params->min_size);
  796. return retsize;
  797. }
  798. static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed)
  799. {
  800. return jhash2(key, length, seed);
  801. }
  802. /**
  803. * rhashtable_init - initialize a new hash table
  804. * @ht: hash table to be initialized
  805. * @params: configuration parameters
  806. *
  807. * Initializes a new hash table based on the provided configuration
  808. * parameters. A table can be configured either with a variable or
  809. * fixed length key:
  810. *
  811. * Configuration Example 1: Fixed length keys
  812. * struct test_obj {
  813. * int key;
  814. * void * my_member;
  815. * struct rhash_head node;
  816. * };
  817. *
  818. * struct rhashtable_params params = {
  819. * .head_offset = offsetof(struct test_obj, node),
  820. * .key_offset = offsetof(struct test_obj, key),
  821. * .key_len = sizeof(int),
  822. * .hashfn = jhash,
  823. * .nulls_base = (1U << RHT_BASE_SHIFT),
  824. * };
  825. *
  826. * Configuration Example 2: Variable length keys
  827. * struct test_obj {
  828. * [...]
  829. * struct rhash_head node;
  830. * };
  831. *
  832. * u32 my_hash_fn(const void *data, u32 len, u32 seed)
  833. * {
  834. * struct test_obj *obj = data;
  835. *
  836. * return [... hash ...];
  837. * }
  838. *
  839. * struct rhashtable_params params = {
  840. * .head_offset = offsetof(struct test_obj, node),
  841. * .hashfn = jhash,
  842. * .obj_hashfn = my_hash_fn,
  843. * };
  844. */
  845. int rhashtable_init(struct rhashtable *ht,
  846. const struct rhashtable_params *params)
  847. {
  848. struct bucket_table *tbl;
  849. size_t size;
  850. if ((!params->key_len && !params->obj_hashfn) ||
  851. (params->obj_hashfn && !params->obj_cmpfn))
  852. return -EINVAL;
  853. if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))
  854. return -EINVAL;
  855. memset(ht, 0, sizeof(*ht));
  856. mutex_init(&ht->mutex);
  857. spin_lock_init(&ht->lock);
  858. memcpy(&ht->p, params, sizeof(*params));
  859. if (params->min_size)
  860. ht->p.min_size = roundup_pow_of_two(params->min_size);
  861. /* Cap total entries at 2^31 to avoid nelems overflow. */
  862. ht->max_elems = 1u << 31;
  863. if (params->max_size) {
  864. ht->p.max_size = rounddown_pow_of_two(params->max_size);
  865. if (ht->p.max_size < ht->max_elems / 2)
  866. ht->max_elems = ht->p.max_size * 2;
  867. }
  868. ht->p.min_size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE);
  869. size = rounded_hashtable_size(&ht->p);
  870. if (params->locks_mul)
  871. ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
  872. else
  873. ht->p.locks_mul = BUCKET_LOCKS_PER_CPU;
  874. ht->key_len = ht->p.key_len;
  875. if (!params->hashfn) {
  876. ht->p.hashfn = jhash;
  877. if (!(ht->key_len & (sizeof(u32) - 1))) {
  878. ht->key_len /= sizeof(u32);
  879. ht->p.hashfn = rhashtable_jhash2;
  880. }
  881. }
  882. tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
  883. if (tbl == NULL)
  884. return -ENOMEM;
  885. atomic_set(&ht->nelems, 0);
  886. RCU_INIT_POINTER(ht->tbl, tbl);
  887. INIT_WORK(&ht->run_work, rht_deferred_worker);
  888. return 0;
  889. }
  890. EXPORT_SYMBOL_GPL(rhashtable_init);
  891. /**
  892. * rhltable_init - initialize a new hash list table
  893. * @hlt: hash list table to be initialized
  894. * @params: configuration parameters
  895. *
  896. * Initializes a new hash list table.
  897. *
  898. * See documentation for rhashtable_init.
  899. */
  900. int rhltable_init(struct rhltable *hlt, const struct rhashtable_params *params)
  901. {
  902. int err;
  903. /* No rhlist NULLs marking for now. */
  904. if (params->nulls_base)
  905. return -EINVAL;
  906. err = rhashtable_init(&hlt->ht, params);
  907. hlt->ht.rhlist = true;
  908. return err;
  909. }
  910. EXPORT_SYMBOL_GPL(rhltable_init);
  911. static void rhashtable_free_one(struct rhashtable *ht, struct rhash_head *obj,
  912. void (*free_fn)(void *ptr, void *arg),
  913. void *arg)
  914. {
  915. struct rhlist_head *list;
  916. if (!ht->rhlist) {
  917. free_fn(rht_obj(ht, obj), arg);
  918. return;
  919. }
  920. list = container_of(obj, struct rhlist_head, rhead);
  921. do {
  922. obj = &list->rhead;
  923. list = rht_dereference(list->next, ht);
  924. free_fn(rht_obj(ht, obj), arg);
  925. } while (list);
  926. }
  927. /**
  928. * rhashtable_free_and_destroy - free elements and destroy hash table
  929. * @ht: the hash table to destroy
  930. * @free_fn: callback to release resources of element
  931. * @arg: pointer passed to free_fn
  932. *
  933. * Stops an eventual async resize. If defined, invokes free_fn for each
  934. * element to releasal resources. Please note that RCU protected
  935. * readers may still be accessing the elements. Releasing of resources
  936. * must occur in a compatible manner. Then frees the bucket array.
  937. *
  938. * This function will eventually sleep to wait for an async resize
  939. * to complete. The caller is responsible that no further write operations
  940. * occurs in parallel.
  941. */
  942. void rhashtable_free_and_destroy(struct rhashtable *ht,
  943. void (*free_fn)(void *ptr, void *arg),
  944. void *arg)
  945. {
  946. struct bucket_table *tbl, *next_tbl;
  947. unsigned int i;
  948. cancel_work_sync(&ht->run_work);
  949. mutex_lock(&ht->mutex);
  950. tbl = rht_dereference(ht->tbl, ht);
  951. restart:
  952. if (free_fn) {
  953. for (i = 0; i < tbl->size; i++) {
  954. struct rhash_head *pos, *next;
  955. cond_resched();
  956. for (pos = rht_dereference(*rht_bucket(tbl, i), ht),
  957. next = !rht_is_a_nulls(pos) ?
  958. rht_dereference(pos->next, ht) : NULL;
  959. !rht_is_a_nulls(pos);
  960. pos = next,
  961. next = !rht_is_a_nulls(pos) ?
  962. rht_dereference(pos->next, ht) : NULL)
  963. rhashtable_free_one(ht, pos, free_fn, arg);
  964. }
  965. }
  966. next_tbl = rht_dereference(tbl->future_tbl, ht);
  967. bucket_table_free(tbl);
  968. if (next_tbl) {
  969. tbl = next_tbl;
  970. goto restart;
  971. }
  972. mutex_unlock(&ht->mutex);
  973. }
  974. EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy);
  975. void rhashtable_destroy(struct rhashtable *ht)
  976. {
  977. return rhashtable_free_and_destroy(ht, NULL, NULL);
  978. }
  979. EXPORT_SYMBOL_GPL(rhashtable_destroy);
  980. struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
  981. unsigned int hash)
  982. {
  983. const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
  984. static struct rhash_head __rcu *rhnull =
  985. (struct rhash_head __rcu *)NULLS_MARKER(0);
  986. unsigned int index = hash & ((1 << tbl->nest) - 1);
  987. unsigned int size = tbl->size >> tbl->nest;
  988. unsigned int subhash = hash;
  989. union nested_table *ntbl;
  990. ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
  991. ntbl = rht_dereference_bucket_rcu(ntbl[index].table, tbl, hash);
  992. subhash >>= tbl->nest;
  993. while (ntbl && size > (1 << shift)) {
  994. index = subhash & ((1 << shift) - 1);
  995. ntbl = rht_dereference_bucket_rcu(ntbl[index].table,
  996. tbl, hash);
  997. size >>= shift;
  998. subhash >>= shift;
  999. }
  1000. if (!ntbl)
  1001. return &rhnull;
  1002. return &ntbl[subhash].bucket;
  1003. }
  1004. EXPORT_SYMBOL_GPL(rht_bucket_nested);
  1005. struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,
  1006. struct bucket_table *tbl,
  1007. unsigned int hash)
  1008. {
  1009. const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
  1010. unsigned int index = hash & ((1 << tbl->nest) - 1);
  1011. unsigned int size = tbl->size >> tbl->nest;
  1012. union nested_table *ntbl;
  1013. unsigned int shifted;
  1014. unsigned int nhash;
  1015. ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
  1016. hash >>= tbl->nest;
  1017. nhash = index;
  1018. shifted = tbl->nest;
  1019. ntbl = nested_table_alloc(ht, &ntbl[index].table,
  1020. size <= (1 << shift) ? shifted : 0, nhash);
  1021. while (ntbl && size > (1 << shift)) {
  1022. index = hash & ((1 << shift) - 1);
  1023. size >>= shift;
  1024. hash >>= shift;
  1025. nhash |= index << shifted;
  1026. shifted += shift;
  1027. ntbl = nested_table_alloc(ht, &ntbl[index].table,
  1028. size <= (1 << shift) ? shifted : 0,
  1029. nhash);
  1030. }
  1031. if (!ntbl)
  1032. return NULL;
  1033. return &ntbl[hash].bucket;
  1034. }
  1035. EXPORT_SYMBOL_GPL(rht_bucket_nested_insert);