rhashtable.h 38 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298
  1. /*
  2. * Resizable, Scalable, Concurrent Hash Table
  3. *
  4. * Copyright (c) 2015-2016 Herbert Xu <herbert@gondor.apana.org.au>
  5. * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
  6. * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
  7. *
  8. * Code partially derived from nft_hash
  9. * Rewritten with rehash code from br_multicast plus single list
  10. * pointer as suggested by Josh Triplett
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License version 2 as
  14. * published by the Free Software Foundation.
  15. */
  16. #ifndef _LINUX_RHASHTABLE_H
  17. #define _LINUX_RHASHTABLE_H
  18. #include <linux/atomic.h>
  19. #include <linux/compiler.h>
  20. #include <linux/err.h>
  21. #include <linux/errno.h>
  22. #include <linux/jhash.h>
  23. #include <linux/list_nulls.h>
  24. #include <linux/workqueue.h>
  25. #include <linux/mutex.h>
  26. #include <linux/rculist.h>
  27. /*
  28. * The end of the chain is marked with a special nulls marks which has
  29. * the following format:
  30. *
  31. * +-------+-----------------------------------------------------+-+
  32. * | Base | Hash |1|
  33. * +-------+-----------------------------------------------------+-+
  34. *
  35. * Base (4 bits) : Reserved to distinguish between multiple tables.
  36. * Specified via &struct rhashtable_params.nulls_base.
  37. * Hash (27 bits): Full hash (unmasked) of first element added to bucket
  38. * 1 (1 bit) : Nulls marker (always set)
  39. *
  40. * The remaining bits of the next pointer remain unused for now.
  41. */
  42. #define RHT_BASE_BITS 4
  43. #define RHT_HASH_BITS 27
  44. #define RHT_BASE_SHIFT RHT_HASH_BITS
  45. /* Base bits plus 1 bit for nulls marker */
  46. #define RHT_HASH_RESERVED_SPACE (RHT_BASE_BITS + 1)
  47. /* Maximum chain length before rehash
  48. *
  49. * The maximum (not average) chain length grows with the size of the hash
  50. * table, at a rate of (log N)/(log log N).
  51. *
  52. * The value of 16 is selected so that even if the hash table grew to
  53. * 2^32 you would not expect the maximum chain length to exceed it
  54. * unless we are under attack (or extremely unlucky).
  55. *
  56. * As this limit is only to detect attacks, we don't need to set it to a
  57. * lower value as you'd need the chain length to vastly exceed 16 to have
  58. * any real effect on the system.
  59. */
  60. #define RHT_ELASTICITY 16u
  61. struct rhash_head {
  62. struct rhash_head __rcu *next;
  63. };
  64. struct rhlist_head {
  65. struct rhash_head rhead;
  66. struct rhlist_head __rcu *next;
  67. };
  68. /**
  69. * struct bucket_table - Table of hash buckets
  70. * @size: Number of hash buckets
  71. * @nest: Number of bits of first-level nested table.
  72. * @rehash: Current bucket being rehashed
  73. * @hash_rnd: Random seed to fold into hash
  74. * @locks_mask: Mask to apply before accessing locks[]
  75. * @locks: Array of spinlocks protecting individual buckets
  76. * @walkers: List of active walkers
  77. * @rcu: RCU structure for freeing the table
  78. * @future_tbl: Table under construction during rehashing
  79. * @ntbl: Nested table used when out of memory.
  80. * @buckets: size * hash buckets
  81. */
  82. struct bucket_table {
  83. unsigned int size;
  84. unsigned int nest;
  85. unsigned int rehash;
  86. u32 hash_rnd;
  87. unsigned int locks_mask;
  88. spinlock_t *locks;
  89. struct list_head walkers;
  90. struct rcu_head rcu;
  91. struct bucket_table __rcu *future_tbl;
  92. struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp;
  93. };
  94. /**
  95. * struct rhashtable_compare_arg - Key for the function rhashtable_compare
  96. * @ht: Hash table
  97. * @key: Key to compare against
  98. */
  99. struct rhashtable_compare_arg {
  100. struct rhashtable *ht;
  101. const void *key;
  102. };
  103. typedef u32 (*rht_hashfn_t)(const void *data, u32 len, u32 seed);
  104. typedef u32 (*rht_obj_hashfn_t)(const void *data, u32 len, u32 seed);
  105. typedef int (*rht_obj_cmpfn_t)(struct rhashtable_compare_arg *arg,
  106. const void *obj);
  107. struct rhashtable;
  108. /**
  109. * struct rhashtable_params - Hash table construction parameters
  110. * @nelem_hint: Hint on number of elements, should be 75% of desired size
  111. * @key_len: Length of key
  112. * @key_offset: Offset of key in struct to be hashed
  113. * @head_offset: Offset of rhash_head in struct to be hashed
  114. * @max_size: Maximum size while expanding
  115. * @min_size: Minimum size while shrinking
  116. * @locks_mul: Number of bucket locks to allocate per cpu (default: 32)
  117. * @automatic_shrinking: Enable automatic shrinking of tables
  118. * @nulls_base: Base value to generate nulls marker
  119. * @hashfn: Hash function (default: jhash2 if !(key_len % 4), or jhash)
  120. * @obj_hashfn: Function to hash object
  121. * @obj_cmpfn: Function to compare key with object
  122. */
  123. struct rhashtable_params {
  124. u16 nelem_hint;
  125. u16 key_len;
  126. u16 key_offset;
  127. u16 head_offset;
  128. unsigned int max_size;
  129. u16 min_size;
  130. bool automatic_shrinking;
  131. u8 locks_mul;
  132. u32 nulls_base;
  133. rht_hashfn_t hashfn;
  134. rht_obj_hashfn_t obj_hashfn;
  135. rht_obj_cmpfn_t obj_cmpfn;
  136. };
  137. /**
  138. * struct rhashtable - Hash table handle
  139. * @tbl: Bucket table
  140. * @nelems: Number of elements in table
  141. * @key_len: Key length for hashfn
  142. * @p: Configuration parameters
  143. * @max_elems: Maximum number of elements in table
  144. * @rhlist: True if this is an rhltable
  145. * @run_work: Deferred worker to expand/shrink asynchronously
  146. * @mutex: Mutex to protect current/future table swapping
  147. * @lock: Spin lock to protect walker list
  148. */
  149. struct rhashtable {
  150. struct bucket_table __rcu *tbl;
  151. atomic_t nelems;
  152. unsigned int key_len;
  153. struct rhashtable_params p;
  154. unsigned int max_elems;
  155. bool rhlist;
  156. struct work_struct run_work;
  157. struct mutex mutex;
  158. spinlock_t lock;
  159. };
  160. /**
  161. * struct rhltable - Hash table with duplicate objects in a list
  162. * @ht: Underlying rhtable
  163. */
  164. struct rhltable {
  165. struct rhashtable ht;
  166. };
  167. /**
  168. * struct rhashtable_walker - Hash table walker
  169. * @list: List entry on list of walkers
  170. * @tbl: The table that we were walking over
  171. */
  172. struct rhashtable_walker {
  173. struct list_head list;
  174. struct bucket_table *tbl;
  175. };
  176. /**
  177. * struct rhashtable_iter - Hash table iterator
  178. * @ht: Table to iterate through
  179. * @p: Current pointer
  180. * @list: Current hash list pointer
  181. * @walker: Associated rhashtable walker
  182. * @slot: Current slot
  183. * @skip: Number of entries to skip in slot
  184. */
  185. struct rhashtable_iter {
  186. struct rhashtable *ht;
  187. struct rhash_head *p;
  188. struct rhlist_head *list;
  189. struct rhashtable_walker walker;
  190. unsigned int slot;
  191. unsigned int skip;
  192. bool end_of_table;
  193. };
  194. static inline unsigned long rht_marker(const struct rhashtable *ht, u32 hash)
  195. {
  196. return NULLS_MARKER(ht->p.nulls_base + hash);
  197. }
  198. #define INIT_RHT_NULLS_HEAD(ptr, ht, hash) \
  199. ((ptr) = (typeof(ptr)) rht_marker(ht, hash))
  200. static inline bool rht_is_a_nulls(const struct rhash_head *ptr)
  201. {
  202. return ((unsigned long) ptr & 1);
  203. }
  204. static inline unsigned long rht_get_nulls_value(const struct rhash_head *ptr)
  205. {
  206. return ((unsigned long) ptr) >> 1;
  207. }
  208. static inline void *rht_obj(const struct rhashtable *ht,
  209. const struct rhash_head *he)
  210. {
  211. return (char *)he - ht->p.head_offset;
  212. }
  213. static inline unsigned int rht_bucket_index(const struct bucket_table *tbl,
  214. unsigned int hash)
  215. {
  216. return (hash >> RHT_HASH_RESERVED_SPACE) & (tbl->size - 1);
  217. }
  218. static inline unsigned int rht_key_hashfn(
  219. struct rhashtable *ht, const struct bucket_table *tbl,
  220. const void *key, const struct rhashtable_params params)
  221. {
  222. unsigned int hash;
  223. /* params must be equal to ht->p if it isn't constant. */
  224. if (!__builtin_constant_p(params.key_len))
  225. hash = ht->p.hashfn(key, ht->key_len, tbl->hash_rnd);
  226. else if (params.key_len) {
  227. unsigned int key_len = params.key_len;
  228. if (params.hashfn)
  229. hash = params.hashfn(key, key_len, tbl->hash_rnd);
  230. else if (key_len & (sizeof(u32) - 1))
  231. hash = jhash(key, key_len, tbl->hash_rnd);
  232. else
  233. hash = jhash2(key, key_len / sizeof(u32),
  234. tbl->hash_rnd);
  235. } else {
  236. unsigned int key_len = ht->p.key_len;
  237. if (params.hashfn)
  238. hash = params.hashfn(key, key_len, tbl->hash_rnd);
  239. else
  240. hash = jhash(key, key_len, tbl->hash_rnd);
  241. }
  242. return rht_bucket_index(tbl, hash);
  243. }
  244. static inline unsigned int rht_head_hashfn(
  245. struct rhashtable *ht, const struct bucket_table *tbl,
  246. const struct rhash_head *he, const struct rhashtable_params params)
  247. {
  248. const char *ptr = rht_obj(ht, he);
  249. return likely(params.obj_hashfn) ?
  250. rht_bucket_index(tbl, params.obj_hashfn(ptr, params.key_len ?:
  251. ht->p.key_len,
  252. tbl->hash_rnd)) :
  253. rht_key_hashfn(ht, tbl, ptr + params.key_offset, params);
  254. }
  255. /**
  256. * rht_grow_above_75 - returns true if nelems > 0.75 * table-size
  257. * @ht: hash table
  258. * @tbl: current table
  259. */
  260. static inline bool rht_grow_above_75(const struct rhashtable *ht,
  261. const struct bucket_table *tbl)
  262. {
  263. /* Expand table when exceeding 75% load */
  264. return atomic_read(&ht->nelems) > (tbl->size / 4 * 3) &&
  265. (!ht->p.max_size || tbl->size < ht->p.max_size);
  266. }
  267. /**
  268. * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
  269. * @ht: hash table
  270. * @tbl: current table
  271. */
  272. static inline bool rht_shrink_below_30(const struct rhashtable *ht,
  273. const struct bucket_table *tbl)
  274. {
  275. /* Shrink table beneath 30% load */
  276. return atomic_read(&ht->nelems) < (tbl->size * 3 / 10) &&
  277. tbl->size > ht->p.min_size;
  278. }
  279. /**
  280. * rht_grow_above_100 - returns true if nelems > table-size
  281. * @ht: hash table
  282. * @tbl: current table
  283. */
  284. static inline bool rht_grow_above_100(const struct rhashtable *ht,
  285. const struct bucket_table *tbl)
  286. {
  287. return atomic_read(&ht->nelems) > tbl->size &&
  288. (!ht->p.max_size || tbl->size < ht->p.max_size);
  289. }
  290. /**
  291. * rht_grow_above_max - returns true if table is above maximum
  292. * @ht: hash table
  293. * @tbl: current table
  294. */
  295. static inline bool rht_grow_above_max(const struct rhashtable *ht,
  296. const struct bucket_table *tbl)
  297. {
  298. return atomic_read(&ht->nelems) >= ht->max_elems;
  299. }
  300. /* The bucket lock is selected based on the hash and protects mutations
  301. * on a group of hash buckets.
  302. *
  303. * A maximum of tbl->size/2 bucket locks is allocated. This ensures that
  304. * a single lock always covers both buckets which may both contains
  305. * entries which link to the same bucket of the old table during resizing.
  306. * This allows to simplify the locking as locking the bucket in both
  307. * tables during resize always guarantee protection.
  308. *
  309. * IMPORTANT: When holding the bucket lock of both the old and new table
  310. * during expansions and shrinking, the old bucket lock must always be
  311. * acquired first.
  312. */
  313. static inline spinlock_t *rht_bucket_lock(const struct bucket_table *tbl,
  314. unsigned int hash)
  315. {
  316. return &tbl->locks[hash & tbl->locks_mask];
  317. }
  318. #ifdef CONFIG_PROVE_LOCKING
  319. int lockdep_rht_mutex_is_held(struct rhashtable *ht);
  320. int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash);
  321. #else
  322. static inline int lockdep_rht_mutex_is_held(struct rhashtable *ht)
  323. {
  324. return 1;
  325. }
  326. static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl,
  327. u32 hash)
  328. {
  329. return 1;
  330. }
  331. #endif /* CONFIG_PROVE_LOCKING */
  332. int rhashtable_init(struct rhashtable *ht,
  333. const struct rhashtable_params *params);
  334. int rhltable_init(struct rhltable *hlt,
  335. const struct rhashtable_params *params);
  336. void *rhashtable_insert_slow(struct rhashtable *ht, const void *key,
  337. struct rhash_head *obj);
  338. void rhashtable_walk_enter(struct rhashtable *ht,
  339. struct rhashtable_iter *iter);
  340. void rhashtable_walk_exit(struct rhashtable_iter *iter);
  341. int rhashtable_walk_start_check(struct rhashtable_iter *iter) __acquires(RCU);
  342. static inline void rhashtable_walk_start(struct rhashtable_iter *iter)
  343. {
  344. (void)rhashtable_walk_start_check(iter);
  345. }
  346. void *rhashtable_walk_next(struct rhashtable_iter *iter);
  347. void *rhashtable_walk_peek(struct rhashtable_iter *iter);
  348. void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU);
  349. void rhashtable_free_and_destroy(struct rhashtable *ht,
  350. void (*free_fn)(void *ptr, void *arg),
  351. void *arg);
  352. void rhashtable_destroy(struct rhashtable *ht);
  353. struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
  354. unsigned int hash);
  355. struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,
  356. struct bucket_table *tbl,
  357. unsigned int hash);
  358. #define rht_dereference(p, ht) \
  359. rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht))
  360. #define rht_dereference_rcu(p, ht) \
  361. rcu_dereference_check(p, lockdep_rht_mutex_is_held(ht))
  362. #define rht_dereference_bucket(p, tbl, hash) \
  363. rcu_dereference_protected(p, lockdep_rht_bucket_is_held(tbl, hash))
  364. #define rht_dereference_bucket_rcu(p, tbl, hash) \
  365. rcu_dereference_check(p, lockdep_rht_bucket_is_held(tbl, hash))
  366. #define rht_entry(tpos, pos, member) \
  367. ({ tpos = container_of(pos, typeof(*tpos), member); 1; })
  368. static inline struct rhash_head __rcu *const *rht_bucket(
  369. const struct bucket_table *tbl, unsigned int hash)
  370. {
  371. return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) :
  372. &tbl->buckets[hash];
  373. }
  374. static inline struct rhash_head __rcu **rht_bucket_var(
  375. struct bucket_table *tbl, unsigned int hash)
  376. {
  377. return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) :
  378. &tbl->buckets[hash];
  379. }
  380. static inline struct rhash_head __rcu **rht_bucket_insert(
  381. struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash)
  382. {
  383. return unlikely(tbl->nest) ? rht_bucket_nested_insert(ht, tbl, hash) :
  384. &tbl->buckets[hash];
  385. }
  386. /**
  387. * rht_for_each_continue - continue iterating over hash chain
  388. * @pos: the &struct rhash_head to use as a loop cursor.
  389. * @head: the previous &struct rhash_head to continue from
  390. * @tbl: the &struct bucket_table
  391. * @hash: the hash value / bucket index
  392. */
  393. #define rht_for_each_continue(pos, head, tbl, hash) \
  394. for (pos = rht_dereference_bucket(head, tbl, hash); \
  395. !rht_is_a_nulls(pos); \
  396. pos = rht_dereference_bucket((pos)->next, tbl, hash))
  397. /**
  398. * rht_for_each - iterate over hash chain
  399. * @pos: the &struct rhash_head to use as a loop cursor.
  400. * @tbl: the &struct bucket_table
  401. * @hash: the hash value / bucket index
  402. */
  403. #define rht_for_each(pos, tbl, hash) \
  404. rht_for_each_continue(pos, *rht_bucket(tbl, hash), tbl, hash)
  405. /**
  406. * rht_for_each_entry_continue - continue iterating over hash chain
  407. * @tpos: the type * to use as a loop cursor.
  408. * @pos: the &struct rhash_head to use as a loop cursor.
  409. * @head: the previous &struct rhash_head to continue from
  410. * @tbl: the &struct bucket_table
  411. * @hash: the hash value / bucket index
  412. * @member: name of the &struct rhash_head within the hashable struct.
  413. */
  414. #define rht_for_each_entry_continue(tpos, pos, head, tbl, hash, member) \
  415. for (pos = rht_dereference_bucket(head, tbl, hash); \
  416. (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
  417. pos = rht_dereference_bucket((pos)->next, tbl, hash))
  418. /**
  419. * rht_for_each_entry - iterate over hash chain of given type
  420. * @tpos: the type * to use as a loop cursor.
  421. * @pos: the &struct rhash_head to use as a loop cursor.
  422. * @tbl: the &struct bucket_table
  423. * @hash: the hash value / bucket index
  424. * @member: name of the &struct rhash_head within the hashable struct.
  425. */
  426. #define rht_for_each_entry(tpos, pos, tbl, hash, member) \
  427. rht_for_each_entry_continue(tpos, pos, *rht_bucket(tbl, hash), \
  428. tbl, hash, member)
  429. /**
  430. * rht_for_each_entry_safe - safely iterate over hash chain of given type
  431. * @tpos: the type * to use as a loop cursor.
  432. * @pos: the &struct rhash_head to use as a loop cursor.
  433. * @next: the &struct rhash_head to use as next in loop cursor.
  434. * @tbl: the &struct bucket_table
  435. * @hash: the hash value / bucket index
  436. * @member: name of the &struct rhash_head within the hashable struct.
  437. *
  438. * This hash chain list-traversal primitive allows for the looped code to
  439. * remove the loop cursor from the list.
  440. */
  441. #define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \
  442. for (pos = rht_dereference_bucket(*rht_bucket(tbl, hash), tbl, hash), \
  443. next = !rht_is_a_nulls(pos) ? \
  444. rht_dereference_bucket(pos->next, tbl, hash) : NULL; \
  445. (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
  446. pos = next, \
  447. next = !rht_is_a_nulls(pos) ? \
  448. rht_dereference_bucket(pos->next, tbl, hash) : NULL)
  449. /**
  450. * rht_for_each_rcu_continue - continue iterating over rcu hash chain
  451. * @pos: the &struct rhash_head to use as a loop cursor.
  452. * @head: the previous &struct rhash_head to continue from
  453. * @tbl: the &struct bucket_table
  454. * @hash: the hash value / bucket index
  455. *
  456. * This hash chain list-traversal primitive may safely run concurrently with
  457. * the _rcu mutation primitives such as rhashtable_insert() as long as the
  458. * traversal is guarded by rcu_read_lock().
  459. */
  460. #define rht_for_each_rcu_continue(pos, head, tbl, hash) \
  461. for (({barrier(); }), \
  462. pos = rht_dereference_bucket_rcu(head, tbl, hash); \
  463. !rht_is_a_nulls(pos); \
  464. pos = rcu_dereference_raw(pos->next))
  465. /**
  466. * rht_for_each_rcu - iterate over rcu hash chain
  467. * @pos: the &struct rhash_head to use as a loop cursor.
  468. * @tbl: the &struct bucket_table
  469. * @hash: the hash value / bucket index
  470. *
  471. * This hash chain list-traversal primitive may safely run concurrently with
  472. * the _rcu mutation primitives such as rhashtable_insert() as long as the
  473. * traversal is guarded by rcu_read_lock().
  474. */
  475. #define rht_for_each_rcu(pos, tbl, hash) \
  476. rht_for_each_rcu_continue(pos, *rht_bucket(tbl, hash), tbl, hash)
  477. /**
  478. * rht_for_each_entry_rcu_continue - continue iterating over rcu hash chain
  479. * @tpos: the type * to use as a loop cursor.
  480. * @pos: the &struct rhash_head to use as a loop cursor.
  481. * @head: the previous &struct rhash_head to continue from
  482. * @tbl: the &struct bucket_table
  483. * @hash: the hash value / bucket index
  484. * @member: name of the &struct rhash_head within the hashable struct.
  485. *
  486. * This hash chain list-traversal primitive may safely run concurrently with
  487. * the _rcu mutation primitives such as rhashtable_insert() as long as the
  488. * traversal is guarded by rcu_read_lock().
  489. */
  490. #define rht_for_each_entry_rcu_continue(tpos, pos, head, tbl, hash, member) \
  491. for (({barrier(); }), \
  492. pos = rht_dereference_bucket_rcu(head, tbl, hash); \
  493. (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
  494. pos = rht_dereference_bucket_rcu(pos->next, tbl, hash))
  495. /**
  496. * rht_for_each_entry_rcu - iterate over rcu hash chain of given type
  497. * @tpos: the type * to use as a loop cursor.
  498. * @pos: the &struct rhash_head to use as a loop cursor.
  499. * @tbl: the &struct bucket_table
  500. * @hash: the hash value / bucket index
  501. * @member: name of the &struct rhash_head within the hashable struct.
  502. *
  503. * This hash chain list-traversal primitive may safely run concurrently with
  504. * the _rcu mutation primitives such as rhashtable_insert() as long as the
  505. * traversal is guarded by rcu_read_lock().
  506. */
  507. #define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \
  508. rht_for_each_entry_rcu_continue(tpos, pos, *rht_bucket(tbl, hash), \
  509. tbl, hash, member)
  510. /**
  511. * rhl_for_each_rcu - iterate over rcu hash table list
  512. * @pos: the &struct rlist_head to use as a loop cursor.
  513. * @list: the head of the list
  514. *
  515. * This hash chain list-traversal primitive should be used on the
  516. * list returned by rhltable_lookup.
  517. */
  518. #define rhl_for_each_rcu(pos, list) \
  519. for (pos = list; pos; pos = rcu_dereference_raw(pos->next))
  520. /**
  521. * rhl_for_each_entry_rcu - iterate over rcu hash table list of given type
  522. * @tpos: the type * to use as a loop cursor.
  523. * @pos: the &struct rlist_head to use as a loop cursor.
  524. * @list: the head of the list
  525. * @member: name of the &struct rlist_head within the hashable struct.
  526. *
  527. * This hash chain list-traversal primitive should be used on the
  528. * list returned by rhltable_lookup.
  529. */
  530. #define rhl_for_each_entry_rcu(tpos, pos, list, member) \
  531. for (pos = list; pos && rht_entry(tpos, pos, member); \
  532. pos = rcu_dereference_raw(pos->next))
  533. static inline int rhashtable_compare(struct rhashtable_compare_arg *arg,
  534. const void *obj)
  535. {
  536. struct rhashtable *ht = arg->ht;
  537. const char *ptr = obj;
  538. return memcmp(ptr + ht->p.key_offset, arg->key, ht->p.key_len);
  539. }
  540. /* Internal function, do not use. */
  541. static inline struct rhash_head *__rhashtable_lookup(
  542. struct rhashtable *ht, const void *key,
  543. const struct rhashtable_params params)
  544. {
  545. struct rhashtable_compare_arg arg = {
  546. .ht = ht,
  547. .key = key,
  548. };
  549. struct bucket_table *tbl;
  550. struct rhash_head *he;
  551. unsigned int hash;
  552. tbl = rht_dereference_rcu(ht->tbl, ht);
  553. restart:
  554. hash = rht_key_hashfn(ht, tbl, key, params);
  555. rht_for_each_rcu(he, tbl, hash) {
  556. if (params.obj_cmpfn ?
  557. params.obj_cmpfn(&arg, rht_obj(ht, he)) :
  558. rhashtable_compare(&arg, rht_obj(ht, he)))
  559. continue;
  560. return he;
  561. }
  562. /* Ensure we see any new tables. */
  563. smp_rmb();
  564. tbl = rht_dereference_rcu(tbl->future_tbl, ht);
  565. if (unlikely(tbl))
  566. goto restart;
  567. return NULL;
  568. }
  569. /**
  570. * rhashtable_lookup - search hash table
  571. * @ht: hash table
  572. * @key: the pointer to the key
  573. * @params: hash table parameters
  574. *
  575. * Computes the hash value for the key and traverses the bucket chain looking
  576. * for a entry with an identical key. The first matching entry is returned.
  577. *
  578. * This must only be called under the RCU read lock.
  579. *
  580. * Returns the first entry on which the compare function returned true.
  581. */
  582. static inline void *rhashtable_lookup(
  583. struct rhashtable *ht, const void *key,
  584. const struct rhashtable_params params)
  585. {
  586. struct rhash_head *he = __rhashtable_lookup(ht, key, params);
  587. return he ? rht_obj(ht, he) : NULL;
  588. }
  589. /**
  590. * rhashtable_lookup_fast - search hash table, without RCU read lock
  591. * @ht: hash table
  592. * @key: the pointer to the key
  593. * @params: hash table parameters
  594. *
  595. * Computes the hash value for the key and traverses the bucket chain looking
  596. * for a entry with an identical key. The first matching entry is returned.
  597. *
  598. * Only use this function when you have other mechanisms guaranteeing
  599. * that the object won't go away after the RCU read lock is released.
  600. *
  601. * Returns the first entry on which the compare function returned true.
  602. */
  603. static inline void *rhashtable_lookup_fast(
  604. struct rhashtable *ht, const void *key,
  605. const struct rhashtable_params params)
  606. {
  607. void *obj;
  608. rcu_read_lock();
  609. obj = rhashtable_lookup(ht, key, params);
  610. rcu_read_unlock();
  611. return obj;
  612. }
  613. /**
  614. * rhltable_lookup - search hash list table
  615. * @hlt: hash table
  616. * @key: the pointer to the key
  617. * @params: hash table parameters
  618. *
  619. * Computes the hash value for the key and traverses the bucket chain looking
  620. * for a entry with an identical key. All matching entries are returned
  621. * in a list.
  622. *
  623. * This must only be called under the RCU read lock.
  624. *
  625. * Returns the list of entries that match the given key.
  626. */
  627. static inline struct rhlist_head *rhltable_lookup(
  628. struct rhltable *hlt, const void *key,
  629. const struct rhashtable_params params)
  630. {
  631. struct rhash_head *he = __rhashtable_lookup(&hlt->ht, key, params);
  632. return he ? container_of(he, struct rhlist_head, rhead) : NULL;
  633. }
  634. /* Internal function, please use rhashtable_insert_fast() instead. This
  635. * function returns the existing element already in hashes in there is a clash,
  636. * otherwise it returns an error via ERR_PTR().
  637. */
  638. static inline void *__rhashtable_insert_fast(
  639. struct rhashtable *ht, const void *key, struct rhash_head *obj,
  640. const struct rhashtable_params params, bool rhlist)
  641. {
  642. struct rhashtable_compare_arg arg = {
  643. .ht = ht,
  644. .key = key,
  645. };
  646. struct rhash_head __rcu **pprev;
  647. struct bucket_table *tbl;
  648. struct rhash_head *head;
  649. spinlock_t *lock;
  650. unsigned int hash;
  651. int elasticity;
  652. void *data;
  653. rcu_read_lock();
  654. tbl = rht_dereference_rcu(ht->tbl, ht);
  655. hash = rht_head_hashfn(ht, tbl, obj, params);
  656. lock = rht_bucket_lock(tbl, hash);
  657. spin_lock_bh(lock);
  658. if (unlikely(rht_dereference_bucket(tbl->future_tbl, tbl, hash))) {
  659. slow_path:
  660. spin_unlock_bh(lock);
  661. rcu_read_unlock();
  662. return rhashtable_insert_slow(ht, key, obj);
  663. }
  664. elasticity = RHT_ELASTICITY;
  665. pprev = rht_bucket_insert(ht, tbl, hash);
  666. data = ERR_PTR(-ENOMEM);
  667. if (!pprev)
  668. goto out;
  669. rht_for_each_continue(head, *pprev, tbl, hash) {
  670. struct rhlist_head *plist;
  671. struct rhlist_head *list;
  672. elasticity--;
  673. if (!key ||
  674. (params.obj_cmpfn ?
  675. params.obj_cmpfn(&arg, rht_obj(ht, head)) :
  676. rhashtable_compare(&arg, rht_obj(ht, head))))
  677. continue;
  678. data = rht_obj(ht, head);
  679. if (!rhlist)
  680. goto out;
  681. list = container_of(obj, struct rhlist_head, rhead);
  682. plist = container_of(head, struct rhlist_head, rhead);
  683. RCU_INIT_POINTER(list->next, plist);
  684. head = rht_dereference_bucket(head->next, tbl, hash);
  685. RCU_INIT_POINTER(list->rhead.next, head);
  686. rcu_assign_pointer(*pprev, obj);
  687. goto good;
  688. }
  689. if (elasticity <= 0)
  690. goto slow_path;
  691. data = ERR_PTR(-E2BIG);
  692. if (unlikely(rht_grow_above_max(ht, tbl)))
  693. goto out;
  694. if (unlikely(rht_grow_above_100(ht, tbl)))
  695. goto slow_path;
  696. head = rht_dereference_bucket(*pprev, tbl, hash);
  697. RCU_INIT_POINTER(obj->next, head);
  698. if (rhlist) {
  699. struct rhlist_head *list;
  700. list = container_of(obj, struct rhlist_head, rhead);
  701. RCU_INIT_POINTER(list->next, NULL);
  702. }
  703. rcu_assign_pointer(*pprev, obj);
  704. atomic_inc(&ht->nelems);
  705. if (rht_grow_above_75(ht, tbl))
  706. schedule_work(&ht->run_work);
  707. good:
  708. data = NULL;
  709. out:
  710. spin_unlock_bh(lock);
  711. rcu_read_unlock();
  712. return data;
  713. }
  714. /**
  715. * rhashtable_insert_fast - insert object into hash table
  716. * @ht: hash table
  717. * @obj: pointer to hash head inside object
  718. * @params: hash table parameters
  719. *
  720. * Will take a per bucket spinlock to protect against mutual mutations
  721. * on the same bucket. Multiple insertions may occur in parallel unless
  722. * they map to the same bucket lock.
  723. *
  724. * It is safe to call this function from atomic context.
  725. *
  726. * Will trigger an automatic deferred table resizing if the size grows
  727. * beyond the watermark indicated by grow_decision() which can be passed
  728. * to rhashtable_init().
  729. */
  730. static inline int rhashtable_insert_fast(
  731. struct rhashtable *ht, struct rhash_head *obj,
  732. const struct rhashtable_params params)
  733. {
  734. void *ret;
  735. ret = __rhashtable_insert_fast(ht, NULL, obj, params, false);
  736. if (IS_ERR(ret))
  737. return PTR_ERR(ret);
  738. return ret == NULL ? 0 : -EEXIST;
  739. }
  740. /**
  741. * rhltable_insert_key - insert object into hash list table
  742. * @hlt: hash list table
  743. * @key: the pointer to the key
  744. * @list: pointer to hash list head inside object
  745. * @params: hash table parameters
  746. *
  747. * Will take a per bucket spinlock to protect against mutual mutations
  748. * on the same bucket. Multiple insertions may occur in parallel unless
  749. * they map to the same bucket lock.
  750. *
  751. * It is safe to call this function from atomic context.
  752. *
  753. * Will trigger an automatic deferred table resizing if the size grows
  754. * beyond the watermark indicated by grow_decision() which can be passed
  755. * to rhashtable_init().
  756. */
  757. static inline int rhltable_insert_key(
  758. struct rhltable *hlt, const void *key, struct rhlist_head *list,
  759. const struct rhashtable_params params)
  760. {
  761. return PTR_ERR(__rhashtable_insert_fast(&hlt->ht, key, &list->rhead,
  762. params, true));
  763. }
  764. /**
  765. * rhltable_insert - insert object into hash list table
  766. * @hlt: hash list table
  767. * @list: pointer to hash list head inside object
  768. * @params: hash table parameters
  769. *
  770. * Will take a per bucket spinlock to protect against mutual mutations
  771. * on the same bucket. Multiple insertions may occur in parallel unless
  772. * they map to the same bucket lock.
  773. *
  774. * It is safe to call this function from atomic context.
  775. *
  776. * Will trigger an automatic deferred table resizing if the size grows
  777. * beyond the watermark indicated by grow_decision() which can be passed
  778. * to rhashtable_init().
  779. */
  780. static inline int rhltable_insert(
  781. struct rhltable *hlt, struct rhlist_head *list,
  782. const struct rhashtable_params params)
  783. {
  784. const char *key = rht_obj(&hlt->ht, &list->rhead);
  785. key += params.key_offset;
  786. return rhltable_insert_key(hlt, key, list, params);
  787. }
  788. /**
  789. * rhashtable_lookup_insert_fast - lookup and insert object into hash table
  790. * @ht: hash table
  791. * @obj: pointer to hash head inside object
  792. * @params: hash table parameters
  793. *
  794. * Locks down the bucket chain in both the old and new table if a resize
  795. * is in progress to ensure that writers can't remove from the old table
  796. * and can't insert to the new table during the atomic operation of search
  797. * and insertion. Searches for duplicates in both the old and new table if
  798. * a resize is in progress.
  799. *
  800. * This lookup function may only be used for fixed key hash table (key_len
  801. * parameter set). It will BUG() if used inappropriately.
  802. *
  803. * It is safe to call this function from atomic context.
  804. *
  805. * Will trigger an automatic deferred table resizing if the size grows
  806. * beyond the watermark indicated by grow_decision() which can be passed
  807. * to rhashtable_init().
  808. */
  809. static inline int rhashtable_lookup_insert_fast(
  810. struct rhashtable *ht, struct rhash_head *obj,
  811. const struct rhashtable_params params)
  812. {
  813. const char *key = rht_obj(ht, obj);
  814. void *ret;
  815. BUG_ON(ht->p.obj_hashfn);
  816. ret = __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj, params,
  817. false);
  818. if (IS_ERR(ret))
  819. return PTR_ERR(ret);
  820. return ret == NULL ? 0 : -EEXIST;
  821. }
  822. /**
  823. * rhashtable_lookup_get_insert_fast - lookup and insert object into hash table
  824. * @ht: hash table
  825. * @obj: pointer to hash head inside object
  826. * @params: hash table parameters
  827. *
  828. * Just like rhashtable_lookup_insert_fast(), but this function returns the
  829. * object if it exists, NULL if it did not and the insertion was successful,
  830. * and an ERR_PTR otherwise.
  831. */
  832. static inline void *rhashtable_lookup_get_insert_fast(
  833. struct rhashtable *ht, struct rhash_head *obj,
  834. const struct rhashtable_params params)
  835. {
  836. const char *key = rht_obj(ht, obj);
  837. BUG_ON(ht->p.obj_hashfn);
  838. return __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj, params,
  839. false);
  840. }
  841. /**
  842. * rhashtable_lookup_insert_key - search and insert object to hash table
  843. * with explicit key
  844. * @ht: hash table
  845. * @key: key
  846. * @obj: pointer to hash head inside object
  847. * @params: hash table parameters
  848. *
  849. * Locks down the bucket chain in both the old and new table if a resize
  850. * is in progress to ensure that writers can't remove from the old table
  851. * and can't insert to the new table during the atomic operation of search
  852. * and insertion. Searches for duplicates in both the old and new table if
  853. * a resize is in progress.
  854. *
  855. * Lookups may occur in parallel with hashtable mutations and resizing.
  856. *
  857. * Will trigger an automatic deferred table resizing if the size grows
  858. * beyond the watermark indicated by grow_decision() which can be passed
  859. * to rhashtable_init().
  860. *
  861. * Returns zero on success.
  862. */
  863. static inline int rhashtable_lookup_insert_key(
  864. struct rhashtable *ht, const void *key, struct rhash_head *obj,
  865. const struct rhashtable_params params)
  866. {
  867. void *ret;
  868. BUG_ON(!ht->p.obj_hashfn || !key);
  869. ret = __rhashtable_insert_fast(ht, key, obj, params, false);
  870. if (IS_ERR(ret))
  871. return PTR_ERR(ret);
  872. return ret == NULL ? 0 : -EEXIST;
  873. }
  874. /**
  875. * rhashtable_lookup_get_insert_key - lookup and insert object into hash table
  876. * @ht: hash table
  877. * @obj: pointer to hash head inside object
  878. * @params: hash table parameters
  879. * @data: pointer to element data already in hashes
  880. *
  881. * Just like rhashtable_lookup_insert_key(), but this function returns the
  882. * object if it exists, NULL if it does not and the insertion was successful,
  883. * and an ERR_PTR otherwise.
  884. */
  885. static inline void *rhashtable_lookup_get_insert_key(
  886. struct rhashtable *ht, const void *key, struct rhash_head *obj,
  887. const struct rhashtable_params params)
  888. {
  889. BUG_ON(!ht->p.obj_hashfn || !key);
  890. return __rhashtable_insert_fast(ht, key, obj, params, false);
  891. }
  892. /* Internal function, please use rhashtable_remove_fast() instead */
  893. static inline int __rhashtable_remove_fast_one(
  894. struct rhashtable *ht, struct bucket_table *tbl,
  895. struct rhash_head *obj, const struct rhashtable_params params,
  896. bool rhlist)
  897. {
  898. struct rhash_head __rcu **pprev;
  899. struct rhash_head *he;
  900. spinlock_t * lock;
  901. unsigned int hash;
  902. int err = -ENOENT;
  903. hash = rht_head_hashfn(ht, tbl, obj, params);
  904. lock = rht_bucket_lock(tbl, hash);
  905. spin_lock_bh(lock);
  906. pprev = rht_bucket_var(tbl, hash);
  907. rht_for_each_continue(he, *pprev, tbl, hash) {
  908. struct rhlist_head *list;
  909. list = container_of(he, struct rhlist_head, rhead);
  910. if (he != obj) {
  911. struct rhlist_head __rcu **lpprev;
  912. pprev = &he->next;
  913. if (!rhlist)
  914. continue;
  915. do {
  916. lpprev = &list->next;
  917. list = rht_dereference_bucket(list->next,
  918. tbl, hash);
  919. } while (list && obj != &list->rhead);
  920. if (!list)
  921. continue;
  922. list = rht_dereference_bucket(list->next, tbl, hash);
  923. RCU_INIT_POINTER(*lpprev, list);
  924. err = 0;
  925. break;
  926. }
  927. obj = rht_dereference_bucket(obj->next, tbl, hash);
  928. err = 1;
  929. if (rhlist) {
  930. list = rht_dereference_bucket(list->next, tbl, hash);
  931. if (list) {
  932. RCU_INIT_POINTER(list->rhead.next, obj);
  933. obj = &list->rhead;
  934. err = 0;
  935. }
  936. }
  937. rcu_assign_pointer(*pprev, obj);
  938. break;
  939. }
  940. spin_unlock_bh(lock);
  941. if (err > 0) {
  942. atomic_dec(&ht->nelems);
  943. if (unlikely(ht->p.automatic_shrinking &&
  944. rht_shrink_below_30(ht, tbl)))
  945. schedule_work(&ht->run_work);
  946. err = 0;
  947. }
  948. return err;
  949. }
  950. /* Internal function, please use rhashtable_remove_fast() instead */
  951. static inline int __rhashtable_remove_fast(
  952. struct rhashtable *ht, struct rhash_head *obj,
  953. const struct rhashtable_params params, bool rhlist)
  954. {
  955. struct bucket_table *tbl;
  956. int err;
  957. rcu_read_lock();
  958. tbl = rht_dereference_rcu(ht->tbl, ht);
  959. /* Because we have already taken (and released) the bucket
  960. * lock in old_tbl, if we find that future_tbl is not yet
  961. * visible then that guarantees the entry to still be in
  962. * the old tbl if it exists.
  963. */
  964. while ((err = __rhashtable_remove_fast_one(ht, tbl, obj, params,
  965. rhlist)) &&
  966. (tbl = rht_dereference_rcu(tbl->future_tbl, ht)))
  967. ;
  968. rcu_read_unlock();
  969. return err;
  970. }
  971. /**
  972. * rhashtable_remove_fast - remove object from hash table
  973. * @ht: hash table
  974. * @obj: pointer to hash head inside object
  975. * @params: hash table parameters
  976. *
  977. * Since the hash chain is single linked, the removal operation needs to
  978. * walk the bucket chain upon removal. The removal operation is thus
  979. * considerable slow if the hash table is not correctly sized.
  980. *
  981. * Will automatically shrink the table via rhashtable_expand() if the
  982. * shrink_decision function specified at rhashtable_init() returns true.
  983. *
  984. * Returns zero on success, -ENOENT if the entry could not be found.
  985. */
  986. static inline int rhashtable_remove_fast(
  987. struct rhashtable *ht, struct rhash_head *obj,
  988. const struct rhashtable_params params)
  989. {
  990. return __rhashtable_remove_fast(ht, obj, params, false);
  991. }
  992. /**
  993. * rhltable_remove - remove object from hash list table
  994. * @hlt: hash list table
  995. * @list: pointer to hash list head inside object
  996. * @params: hash table parameters
  997. *
  998. * Since the hash chain is single linked, the removal operation needs to
  999. * walk the bucket chain upon removal. The removal operation is thus
  1000. * considerable slow if the hash table is not correctly sized.
  1001. *
  1002. * Will automatically shrink the table via rhashtable_expand() if the
  1003. * shrink_decision function specified at rhashtable_init() returns true.
  1004. *
  1005. * Returns zero on success, -ENOENT if the entry could not be found.
  1006. */
  1007. static inline int rhltable_remove(
  1008. struct rhltable *hlt, struct rhlist_head *list,
  1009. const struct rhashtable_params params)
  1010. {
  1011. return __rhashtable_remove_fast(&hlt->ht, &list->rhead, params, true);
  1012. }
  1013. /* Internal function, please use rhashtable_replace_fast() instead */
  1014. static inline int __rhashtable_replace_fast(
  1015. struct rhashtable *ht, struct bucket_table *tbl,
  1016. struct rhash_head *obj_old, struct rhash_head *obj_new,
  1017. const struct rhashtable_params params)
  1018. {
  1019. struct rhash_head __rcu **pprev;
  1020. struct rhash_head *he;
  1021. spinlock_t *lock;
  1022. unsigned int hash;
  1023. int err = -ENOENT;
  1024. /* Minimally, the old and new objects must have same hash
  1025. * (which should mean identifiers are the same).
  1026. */
  1027. hash = rht_head_hashfn(ht, tbl, obj_old, params);
  1028. if (hash != rht_head_hashfn(ht, tbl, obj_new, params))
  1029. return -EINVAL;
  1030. lock = rht_bucket_lock(tbl, hash);
  1031. spin_lock_bh(lock);
  1032. pprev = rht_bucket_var(tbl, hash);
  1033. rht_for_each_continue(he, *pprev, tbl, hash) {
  1034. if (he != obj_old) {
  1035. pprev = &he->next;
  1036. continue;
  1037. }
  1038. rcu_assign_pointer(obj_new->next, obj_old->next);
  1039. rcu_assign_pointer(*pprev, obj_new);
  1040. err = 0;
  1041. break;
  1042. }
  1043. spin_unlock_bh(lock);
  1044. return err;
  1045. }
  1046. /**
  1047. * rhashtable_replace_fast - replace an object in hash table
  1048. * @ht: hash table
  1049. * @obj_old: pointer to hash head inside object being replaced
  1050. * @obj_new: pointer to hash head inside object which is new
  1051. * @params: hash table parameters
  1052. *
  1053. * Replacing an object doesn't affect the number of elements in the hash table
  1054. * or bucket, so we don't need to worry about shrinking or expanding the
  1055. * table here.
  1056. *
  1057. * Returns zero on success, -ENOENT if the entry could not be found,
  1058. * -EINVAL if hash is not the same for the old and new objects.
  1059. */
  1060. static inline int rhashtable_replace_fast(
  1061. struct rhashtable *ht, struct rhash_head *obj_old,
  1062. struct rhash_head *obj_new,
  1063. const struct rhashtable_params params)
  1064. {
  1065. struct bucket_table *tbl;
  1066. int err;
  1067. rcu_read_lock();
  1068. tbl = rht_dereference_rcu(ht->tbl, ht);
  1069. /* Because we have already taken (and released) the bucket
  1070. * lock in old_tbl, if we find that future_tbl is not yet
  1071. * visible then that guarantees the entry to still be in
  1072. * the old tbl if it exists.
  1073. */
  1074. while ((err = __rhashtable_replace_fast(ht, tbl, obj_old,
  1075. obj_new, params)) &&
  1076. (tbl = rht_dereference_rcu(tbl->future_tbl, ht)))
  1077. ;
  1078. rcu_read_unlock();
  1079. return err;
  1080. }
  1081. /* Obsolete function, do not use in new code. */
  1082. static inline int rhashtable_walk_init(struct rhashtable *ht,
  1083. struct rhashtable_iter *iter, gfp_t gfp)
  1084. {
  1085. rhashtable_walk_enter(ht, iter);
  1086. return 0;
  1087. }
  1088. /**
  1089. * rhltable_walk_enter - Initialise an iterator
  1090. * @hlt: Table to walk over
  1091. * @iter: Hash table Iterator
  1092. *
  1093. * This function prepares a hash table walk.
  1094. *
  1095. * Note that if you restart a walk after rhashtable_walk_stop you
  1096. * may see the same object twice. Also, you may miss objects if
  1097. * there are removals in between rhashtable_walk_stop and the next
  1098. * call to rhashtable_walk_start.
  1099. *
  1100. * For a completely stable walk you should construct your own data
  1101. * structure outside the hash table.
  1102. *
  1103. * This function may sleep so you must not call it from interrupt
  1104. * context or with spin locks held.
  1105. *
  1106. * You must call rhashtable_walk_exit after this function returns.
  1107. */
  1108. static inline void rhltable_walk_enter(struct rhltable *hlt,
  1109. struct rhashtable_iter *iter)
  1110. {
  1111. return rhashtable_walk_enter(&hlt->ht, iter);
  1112. }
  1113. /**
  1114. * rhltable_free_and_destroy - free elements and destroy hash list table
  1115. * @hlt: the hash list table to destroy
  1116. * @free_fn: callback to release resources of element
  1117. * @arg: pointer passed to free_fn
  1118. *
  1119. * See documentation for rhashtable_free_and_destroy.
  1120. */
  1121. static inline void rhltable_free_and_destroy(struct rhltable *hlt,
  1122. void (*free_fn)(void *ptr,
  1123. void *arg),
  1124. void *arg)
  1125. {
  1126. return rhashtable_free_and_destroy(&hlt->ht, free_fn, arg);
  1127. }
  1128. static inline void rhltable_destroy(struct rhltable *hlt)
  1129. {
  1130. return rhltable_free_and_destroy(hlt, NULL, NULL);
  1131. }
  1132. #endif /* _LINUX_RHASHTABLE_H */