xarray.c 50 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * XArray implementation
  4. * Copyright (c) 2017 Microsoft Corporation
  5. * Author: Matthew Wilcox <willy@infradead.org>
  6. */
  7. #include <linux/bitmap.h>
  8. #include <linux/export.h>
  9. #include <linux/list.h>
  10. #include <linux/slab.h>
  11. #include <linux/xarray.h>
  12. /*
  13. * Coding conventions in this file:
  14. *
  15. * @xa is used to refer to the entire xarray.
  16. * @xas is the 'xarray operation state'. It may be either a pointer to
  17. * an xa_state, or an xa_state stored on the stack. This is an unfortunate
  18. * ambiguity.
  19. * @index is the index of the entry being operated on
  20. * @mark is an xa_mark_t; a small number indicating one of the mark bits.
  21. * @node refers to an xa_node; usually the primary one being operated on by
  22. * this function.
  23. * @offset is the index into the slots array inside an xa_node.
  24. * @parent refers to the @xa_node closer to the head than @node.
  25. * @entry refers to something stored in a slot in the xarray
  26. */
  27. static inline unsigned int xa_lock_type(const struct xarray *xa)
  28. {
  29. return (__force unsigned int)xa->xa_flags & 3;
  30. }
  31. static inline void xas_lock_type(struct xa_state *xas, unsigned int lock_type)
  32. {
  33. if (lock_type == XA_LOCK_IRQ)
  34. xas_lock_irq(xas);
  35. else if (lock_type == XA_LOCK_BH)
  36. xas_lock_bh(xas);
  37. else
  38. xas_lock(xas);
  39. }
  40. static inline void xas_unlock_type(struct xa_state *xas, unsigned int lock_type)
  41. {
  42. if (lock_type == XA_LOCK_IRQ)
  43. xas_unlock_irq(xas);
  44. else if (lock_type == XA_LOCK_BH)
  45. xas_unlock_bh(xas);
  46. else
  47. xas_unlock(xas);
  48. }
  49. static inline bool xa_track_free(const struct xarray *xa)
  50. {
  51. return xa->xa_flags & XA_FLAGS_TRACK_FREE;
  52. }
  53. static inline void xa_mark_set(struct xarray *xa, xa_mark_t mark)
  54. {
  55. if (!(xa->xa_flags & XA_FLAGS_MARK(mark)))
  56. xa->xa_flags |= XA_FLAGS_MARK(mark);
  57. }
  58. static inline void xa_mark_clear(struct xarray *xa, xa_mark_t mark)
  59. {
  60. if (xa->xa_flags & XA_FLAGS_MARK(mark))
  61. xa->xa_flags &= ~(XA_FLAGS_MARK(mark));
  62. }
  63. static inline unsigned long *node_marks(struct xa_node *node, xa_mark_t mark)
  64. {
  65. return node->marks[(__force unsigned)mark];
  66. }
  67. static inline bool node_get_mark(struct xa_node *node,
  68. unsigned int offset, xa_mark_t mark)
  69. {
  70. return test_bit(offset, node_marks(node, mark));
  71. }
  72. /* returns true if the bit was set */
  73. static inline bool node_set_mark(struct xa_node *node, unsigned int offset,
  74. xa_mark_t mark)
  75. {
  76. return __test_and_set_bit(offset, node_marks(node, mark));
  77. }
  78. /* returns true if the bit was set */
  79. static inline bool node_clear_mark(struct xa_node *node, unsigned int offset,
  80. xa_mark_t mark)
  81. {
  82. return __test_and_clear_bit(offset, node_marks(node, mark));
  83. }
  84. static inline bool node_any_mark(struct xa_node *node, xa_mark_t mark)
  85. {
  86. return !bitmap_empty(node_marks(node, mark), XA_CHUNK_SIZE);
  87. }
  88. static inline void node_mark_all(struct xa_node *node, xa_mark_t mark)
  89. {
  90. bitmap_fill(node_marks(node, mark), XA_CHUNK_SIZE);
  91. }
  92. #define mark_inc(mark) do { \
  93. mark = (__force xa_mark_t)((__force unsigned)(mark) + 1); \
  94. } while (0)
  95. /*
  96. * xas_squash_marks() - Merge all marks to the first entry
  97. * @xas: Array operation state.
  98. *
  99. * Set a mark on the first entry if any entry has it set. Clear marks on
  100. * all sibling entries.
  101. */
  102. static void xas_squash_marks(const struct xa_state *xas)
  103. {
  104. unsigned int mark = 0;
  105. unsigned int limit = xas->xa_offset + xas->xa_sibs + 1;
  106. if (!xas->xa_sibs)
  107. return;
  108. do {
  109. unsigned long *marks = xas->xa_node->marks[mark];
  110. if (find_next_bit(marks, limit, xas->xa_offset + 1) == limit)
  111. continue;
  112. __set_bit(xas->xa_offset, marks);
  113. bitmap_clear(marks, xas->xa_offset + 1, xas->xa_sibs);
  114. } while (mark++ != (__force unsigned)XA_MARK_MAX);
  115. }
  116. /* extracts the offset within this node from the index */
  117. static unsigned int get_offset(unsigned long index, struct xa_node *node)
  118. {
  119. return (index >> node->shift) & XA_CHUNK_MASK;
  120. }
  121. static void xas_set_offset(struct xa_state *xas)
  122. {
  123. xas->xa_offset = get_offset(xas->xa_index, xas->xa_node);
  124. }
  125. /* move the index either forwards (find) or backwards (sibling slot) */
  126. static void xas_move_index(struct xa_state *xas, unsigned long offset)
  127. {
  128. unsigned int shift = xas->xa_node->shift;
  129. xas->xa_index &= ~XA_CHUNK_MASK << shift;
  130. xas->xa_index += offset << shift;
  131. }
  132. static void xas_advance(struct xa_state *xas)
  133. {
  134. xas->xa_offset++;
  135. xas_move_index(xas, xas->xa_offset);
  136. }
  137. static void *set_bounds(struct xa_state *xas)
  138. {
  139. xas->xa_node = XAS_BOUNDS;
  140. return NULL;
  141. }
  142. /*
  143. * Starts a walk. If the @xas is already valid, we assume that it's on
  144. * the right path and just return where we've got to. If we're in an
  145. * error state, return NULL. If the index is outside the current scope
  146. * of the xarray, return NULL without changing @xas->xa_node. Otherwise
  147. * set @xas->xa_node to NULL and return the current head of the array.
  148. */
  149. static void *xas_start(struct xa_state *xas)
  150. {
  151. void *entry;
  152. if (xas_valid(xas))
  153. return xas_reload(xas);
  154. if (xas_error(xas))
  155. return NULL;
  156. entry = xa_head(xas->xa);
  157. if (!xa_is_node(entry)) {
  158. if (xas->xa_index)
  159. return set_bounds(xas);
  160. } else {
  161. if ((xas->xa_index >> xa_to_node(entry)->shift) > XA_CHUNK_MASK)
  162. return set_bounds(xas);
  163. }
  164. xas->xa_node = NULL;
  165. return entry;
  166. }
  167. static void *xas_descend(struct xa_state *xas, struct xa_node *node)
  168. {
  169. unsigned int offset = get_offset(xas->xa_index, node);
  170. void *entry = xa_entry(xas->xa, node, offset);
  171. xas->xa_node = node;
  172. if (xa_is_sibling(entry)) {
  173. offset = xa_to_sibling(entry);
  174. entry = xa_entry(xas->xa, node, offset);
  175. }
  176. xas->xa_offset = offset;
  177. return entry;
  178. }
  179. /**
  180. * xas_load() - Load an entry from the XArray (advanced).
  181. * @xas: XArray operation state.
  182. *
  183. * Usually walks the @xas to the appropriate state to load the entry
  184. * stored at xa_index. However, it will do nothing and return %NULL if
  185. * @xas is in an error state. xas_load() will never expand the tree.
  186. *
  187. * If the xa_state is set up to operate on a multi-index entry, xas_load()
  188. * may return %NULL or an internal entry, even if there are entries
  189. * present within the range specified by @xas.
  190. *
  191. * Context: Any context. The caller should hold the xa_lock or the RCU lock.
  192. * Return: Usually an entry in the XArray, but see description for exceptions.
  193. */
  194. void *xas_load(struct xa_state *xas)
  195. {
  196. void *entry = xas_start(xas);
  197. while (xa_is_node(entry)) {
  198. struct xa_node *node = xa_to_node(entry);
  199. if (xas->xa_shift > node->shift)
  200. break;
  201. entry = xas_descend(xas, node);
  202. }
  203. return entry;
  204. }
  205. EXPORT_SYMBOL_GPL(xas_load);
  206. /* Move the radix tree node cache here */
  207. extern struct kmem_cache *radix_tree_node_cachep;
  208. extern void radix_tree_node_rcu_free(struct rcu_head *head);
  209. #define XA_RCU_FREE ((struct xarray *)1)
  210. static void xa_node_free(struct xa_node *node)
  211. {
  212. XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
  213. node->array = XA_RCU_FREE;
  214. call_rcu(&node->rcu_head, radix_tree_node_rcu_free);
  215. }
  216. /*
  217. * xas_destroy() - Free any resources allocated during the XArray operation.
  218. * @xas: XArray operation state.
  219. *
  220. * This function is now internal-only.
  221. */
  222. static void xas_destroy(struct xa_state *xas)
  223. {
  224. struct xa_node *node = xas->xa_alloc;
  225. if (!node)
  226. return;
  227. XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
  228. kmem_cache_free(radix_tree_node_cachep, node);
  229. xas->xa_alloc = NULL;
  230. }
  231. /**
  232. * xas_nomem() - Allocate memory if needed.
  233. * @xas: XArray operation state.
  234. * @gfp: Memory allocation flags.
  235. *
  236. * If we need to add new nodes to the XArray, we try to allocate memory
  237. * with GFP_NOWAIT while holding the lock, which will usually succeed.
  238. * If it fails, @xas is flagged as needing memory to continue. The caller
  239. * should drop the lock and call xas_nomem(). If xas_nomem() succeeds,
  240. * the caller should retry the operation.
  241. *
  242. * Forward progress is guaranteed as one node is allocated here and
  243. * stored in the xa_state where it will be found by xas_alloc(). More
  244. * nodes will likely be found in the slab allocator, but we do not tie
  245. * them up here.
  246. *
  247. * Return: true if memory was needed, and was successfully allocated.
  248. */
  249. bool xas_nomem(struct xa_state *xas, gfp_t gfp)
  250. {
  251. if (xas->xa_node != XA_ERROR(-ENOMEM)) {
  252. xas_destroy(xas);
  253. return false;
  254. }
  255. xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
  256. if (!xas->xa_alloc)
  257. return false;
  258. XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list));
  259. xas->xa_node = XAS_RESTART;
  260. return true;
  261. }
  262. EXPORT_SYMBOL_GPL(xas_nomem);
  263. /*
  264. * __xas_nomem() - Drop locks and allocate memory if needed.
  265. * @xas: XArray operation state.
  266. * @gfp: Memory allocation flags.
  267. *
  268. * Internal variant of xas_nomem().
  269. *
  270. * Return: true if memory was needed, and was successfully allocated.
  271. */
  272. static bool __xas_nomem(struct xa_state *xas, gfp_t gfp)
  273. __must_hold(xas->xa->xa_lock)
  274. {
  275. unsigned int lock_type = xa_lock_type(xas->xa);
  276. if (xas->xa_node != XA_ERROR(-ENOMEM)) {
  277. xas_destroy(xas);
  278. return false;
  279. }
  280. if (gfpflags_allow_blocking(gfp)) {
  281. xas_unlock_type(xas, lock_type);
  282. xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
  283. xas_lock_type(xas, lock_type);
  284. } else {
  285. xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
  286. }
  287. if (!xas->xa_alloc)
  288. return false;
  289. XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list));
  290. xas->xa_node = XAS_RESTART;
  291. return true;
  292. }
  293. static void xas_update(struct xa_state *xas, struct xa_node *node)
  294. {
  295. if (xas->xa_update)
  296. xas->xa_update(node);
  297. else
  298. XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
  299. }
  300. static void *xas_alloc(struct xa_state *xas, unsigned int shift)
  301. {
  302. struct xa_node *parent = xas->xa_node;
  303. struct xa_node *node = xas->xa_alloc;
  304. if (xas_invalid(xas))
  305. return NULL;
  306. if (node) {
  307. xas->xa_alloc = NULL;
  308. } else {
  309. node = kmem_cache_alloc(radix_tree_node_cachep,
  310. GFP_NOWAIT | __GFP_NOWARN);
  311. if (!node) {
  312. xas_set_err(xas, -ENOMEM);
  313. return NULL;
  314. }
  315. }
  316. if (parent) {
  317. node->offset = xas->xa_offset;
  318. parent->count++;
  319. XA_NODE_BUG_ON(node, parent->count > XA_CHUNK_SIZE);
  320. xas_update(xas, parent);
  321. }
  322. XA_NODE_BUG_ON(node, shift > BITS_PER_LONG);
  323. XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
  324. node->shift = shift;
  325. node->count = 0;
  326. node->nr_values = 0;
  327. RCU_INIT_POINTER(node->parent, xas->xa_node);
  328. node->array = xas->xa;
  329. return node;
  330. }
  331. /*
  332. * Use this to calculate the maximum index that will need to be created
  333. * in order to add the entry described by @xas. Because we cannot store a
  334. * multiple-index entry at index 0, the calculation is a little more complex
  335. * than you might expect.
  336. */
  337. static unsigned long xas_max(struct xa_state *xas)
  338. {
  339. unsigned long max = xas->xa_index;
  340. #ifdef CONFIG_XARRAY_MULTI
  341. if (xas->xa_shift || xas->xa_sibs) {
  342. unsigned long mask;
  343. mask = (((xas->xa_sibs + 1UL) << xas->xa_shift) - 1);
  344. max |= mask;
  345. if (mask == max)
  346. max++;
  347. }
  348. #endif
  349. return max;
  350. }
  351. /* The maximum index that can be contained in the array without expanding it */
  352. static unsigned long max_index(void *entry)
  353. {
  354. if (!xa_is_node(entry))
  355. return 0;
  356. return (XA_CHUNK_SIZE << xa_to_node(entry)->shift) - 1;
  357. }
  358. static void xas_shrink(struct xa_state *xas)
  359. {
  360. struct xarray *xa = xas->xa;
  361. struct xa_node *node = xas->xa_node;
  362. for (;;) {
  363. void *entry;
  364. XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
  365. if (node->count != 1)
  366. break;
  367. entry = xa_entry_locked(xa, node, 0);
  368. if (!entry)
  369. break;
  370. if (!xa_is_node(entry) && node->shift)
  371. break;
  372. xas->xa_node = XAS_BOUNDS;
  373. RCU_INIT_POINTER(xa->xa_head, entry);
  374. if (xa_track_free(xa) && !node_get_mark(node, 0, XA_FREE_MARK))
  375. xa_mark_clear(xa, XA_FREE_MARK);
  376. node->count = 0;
  377. node->nr_values = 0;
  378. if (!xa_is_node(entry))
  379. RCU_INIT_POINTER(node->slots[0], XA_RETRY_ENTRY);
  380. xas_update(xas, node);
  381. xa_node_free(node);
  382. if (!xa_is_node(entry))
  383. break;
  384. node = xa_to_node(entry);
  385. node->parent = NULL;
  386. }
  387. }
  388. /*
  389. * xas_delete_node() - Attempt to delete an xa_node
  390. * @xas: Array operation state.
  391. *
  392. * Attempts to delete the @xas->xa_node. This will fail if xa->node has
  393. * a non-zero reference count.
  394. */
  395. static void xas_delete_node(struct xa_state *xas)
  396. {
  397. struct xa_node *node = xas->xa_node;
  398. for (;;) {
  399. struct xa_node *parent;
  400. XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
  401. if (node->count)
  402. break;
  403. parent = xa_parent_locked(xas->xa, node);
  404. xas->xa_node = parent;
  405. xas->xa_offset = node->offset;
  406. xa_node_free(node);
  407. if (!parent) {
  408. xas->xa->xa_head = NULL;
  409. xas->xa_node = XAS_BOUNDS;
  410. return;
  411. }
  412. parent->slots[xas->xa_offset] = NULL;
  413. parent->count--;
  414. XA_NODE_BUG_ON(parent, parent->count > XA_CHUNK_SIZE);
  415. node = parent;
  416. xas_update(xas, node);
  417. }
  418. if (!node->parent)
  419. xas_shrink(xas);
  420. }
  421. /**
  422. * xas_free_nodes() - Free this node and all nodes that it references
  423. * @xas: Array operation state.
  424. * @top: Node to free
  425. *
  426. * This node has been removed from the tree. We must now free it and all
  427. * of its subnodes. There may be RCU walkers with references into the tree,
  428. * so we must replace all entries with retry markers.
  429. */
  430. static void xas_free_nodes(struct xa_state *xas, struct xa_node *top)
  431. {
  432. unsigned int offset = 0;
  433. struct xa_node *node = top;
  434. for (;;) {
  435. void *entry = xa_entry_locked(xas->xa, node, offset);
  436. if (xa_is_node(entry)) {
  437. node = xa_to_node(entry);
  438. offset = 0;
  439. continue;
  440. }
  441. if (entry)
  442. RCU_INIT_POINTER(node->slots[offset], XA_RETRY_ENTRY);
  443. offset++;
  444. while (offset == XA_CHUNK_SIZE) {
  445. struct xa_node *parent;
  446. parent = xa_parent_locked(xas->xa, node);
  447. offset = node->offset + 1;
  448. node->count = 0;
  449. node->nr_values = 0;
  450. xas_update(xas, node);
  451. xa_node_free(node);
  452. if (node == top)
  453. return;
  454. node = parent;
  455. }
  456. }
  457. }
  458. /*
  459. * xas_expand adds nodes to the head of the tree until it has reached
  460. * sufficient height to be able to contain @xas->xa_index
  461. */
  462. static int xas_expand(struct xa_state *xas, void *head)
  463. {
  464. struct xarray *xa = xas->xa;
  465. struct xa_node *node = NULL;
  466. unsigned int shift = 0;
  467. unsigned long max = xas_max(xas);
  468. if (!head) {
  469. if (max == 0)
  470. return 0;
  471. while ((max >> shift) >= XA_CHUNK_SIZE)
  472. shift += XA_CHUNK_SHIFT;
  473. return shift + XA_CHUNK_SHIFT;
  474. } else if (xa_is_node(head)) {
  475. node = xa_to_node(head);
  476. shift = node->shift + XA_CHUNK_SHIFT;
  477. }
  478. xas->xa_node = NULL;
  479. while (max > max_index(head)) {
  480. xa_mark_t mark = 0;
  481. XA_NODE_BUG_ON(node, shift > BITS_PER_LONG);
  482. node = xas_alloc(xas, shift);
  483. if (!node)
  484. return -ENOMEM;
  485. node->count = 1;
  486. if (xa_is_value(head))
  487. node->nr_values = 1;
  488. RCU_INIT_POINTER(node->slots[0], head);
  489. /* Propagate the aggregated mark info to the new child */
  490. for (;;) {
  491. if (xa_track_free(xa) && mark == XA_FREE_MARK) {
  492. node_mark_all(node, XA_FREE_MARK);
  493. if (!xa_marked(xa, XA_FREE_MARK)) {
  494. node_clear_mark(node, 0, XA_FREE_MARK);
  495. xa_mark_set(xa, XA_FREE_MARK);
  496. }
  497. } else if (xa_marked(xa, mark)) {
  498. node_set_mark(node, 0, mark);
  499. }
  500. if (mark == XA_MARK_MAX)
  501. break;
  502. mark_inc(mark);
  503. }
  504. /*
  505. * Now that the new node is fully initialised, we can add
  506. * it to the tree
  507. */
  508. if (xa_is_node(head)) {
  509. xa_to_node(head)->offset = 0;
  510. rcu_assign_pointer(xa_to_node(head)->parent, node);
  511. }
  512. head = xa_mk_node(node);
  513. rcu_assign_pointer(xa->xa_head, head);
  514. xas_update(xas, node);
  515. shift += XA_CHUNK_SHIFT;
  516. }
  517. xas->xa_node = node;
  518. return shift;
  519. }
  520. /*
  521. * xas_create() - Create a slot to store an entry in.
  522. * @xas: XArray operation state.
  523. *
  524. * Most users will not need to call this function directly, as it is called
  525. * by xas_store(). It is useful for doing conditional store operations
  526. * (see the xa_cmpxchg() implementation for an example).
  527. *
  528. * Return: If the slot already existed, returns the contents of this slot.
  529. * If the slot was newly created, returns NULL. If it failed to create the
  530. * slot, returns NULL and indicates the error in @xas.
  531. */
  532. static void *xas_create(struct xa_state *xas)
  533. {
  534. struct xarray *xa = xas->xa;
  535. void *entry;
  536. void __rcu **slot;
  537. struct xa_node *node = xas->xa_node;
  538. int shift;
  539. unsigned int order = xas->xa_shift;
  540. if (xas_top(node)) {
  541. entry = xa_head_locked(xa);
  542. xas->xa_node = NULL;
  543. shift = xas_expand(xas, entry);
  544. if (shift < 0)
  545. return NULL;
  546. entry = xa_head_locked(xa);
  547. slot = &xa->xa_head;
  548. } else if (xas_error(xas)) {
  549. return NULL;
  550. } else if (node) {
  551. unsigned int offset = xas->xa_offset;
  552. shift = node->shift;
  553. entry = xa_entry_locked(xa, node, offset);
  554. slot = &node->slots[offset];
  555. } else {
  556. shift = 0;
  557. entry = xa_head_locked(xa);
  558. slot = &xa->xa_head;
  559. }
  560. while (shift > order) {
  561. shift -= XA_CHUNK_SHIFT;
  562. if (!entry) {
  563. node = xas_alloc(xas, shift);
  564. if (!node)
  565. break;
  566. if (xa_track_free(xa))
  567. node_mark_all(node, XA_FREE_MARK);
  568. rcu_assign_pointer(*slot, xa_mk_node(node));
  569. } else if (xa_is_node(entry)) {
  570. node = xa_to_node(entry);
  571. } else {
  572. break;
  573. }
  574. entry = xas_descend(xas, node);
  575. slot = &node->slots[xas->xa_offset];
  576. }
  577. return entry;
  578. }
  579. /**
  580. * xas_create_range() - Ensure that stores to this range will succeed
  581. * @xas: XArray operation state.
  582. *
  583. * Creates all of the slots in the range covered by @xas. Sets @xas to
  584. * create single-index entries and positions it at the beginning of the
  585. * range. This is for the benefit of users which have not yet been
  586. * converted to use multi-index entries.
  587. */
  588. void xas_create_range(struct xa_state *xas)
  589. {
  590. unsigned long index = xas->xa_index;
  591. unsigned char shift = xas->xa_shift;
  592. unsigned char sibs = xas->xa_sibs;
  593. xas->xa_index |= ((sibs + 1) << shift) - 1;
  594. if (xas_is_node(xas) && xas->xa_node->shift == xas->xa_shift)
  595. xas->xa_offset |= sibs;
  596. xas->xa_shift = 0;
  597. xas->xa_sibs = 0;
  598. for (;;) {
  599. xas_create(xas);
  600. if (xas_error(xas))
  601. goto restore;
  602. if (xas->xa_index <= (index | XA_CHUNK_MASK))
  603. goto success;
  604. xas->xa_index -= XA_CHUNK_SIZE;
  605. for (;;) {
  606. struct xa_node *node = xas->xa_node;
  607. xas->xa_node = xa_parent_locked(xas->xa, node);
  608. xas->xa_offset = node->offset - 1;
  609. if (node->offset != 0)
  610. break;
  611. }
  612. }
  613. restore:
  614. xas->xa_shift = shift;
  615. xas->xa_sibs = sibs;
  616. xas->xa_index = index;
  617. return;
  618. success:
  619. xas->xa_index = index;
  620. if (xas->xa_node)
  621. xas_set_offset(xas);
  622. }
  623. EXPORT_SYMBOL_GPL(xas_create_range);
  624. static void update_node(struct xa_state *xas, struct xa_node *node,
  625. int count, int values)
  626. {
  627. if (!node || (!count && !values))
  628. return;
  629. node->count += count;
  630. node->nr_values += values;
  631. XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
  632. XA_NODE_BUG_ON(node, node->nr_values > XA_CHUNK_SIZE);
  633. xas_update(xas, node);
  634. if (count < 0)
  635. xas_delete_node(xas);
  636. }
  637. /**
  638. * xas_store() - Store this entry in the XArray.
  639. * @xas: XArray operation state.
  640. * @entry: New entry.
  641. *
  642. * If @xas is operating on a multi-index entry, the entry returned by this
  643. * function is essentially meaningless (it may be an internal entry or it
  644. * may be %NULL, even if there are non-NULL entries at some of the indices
  645. * covered by the range). This is not a problem for any current users,
  646. * and can be changed if needed.
  647. *
  648. * Return: The old entry at this index.
  649. */
  650. void *xas_store(struct xa_state *xas, void *entry)
  651. {
  652. struct xa_node *node;
  653. void __rcu **slot = &xas->xa->xa_head;
  654. unsigned int offset, max;
  655. int count = 0;
  656. int values = 0;
  657. void *first, *next;
  658. bool value = xa_is_value(entry);
  659. if (entry)
  660. first = xas_create(xas);
  661. else
  662. first = xas_load(xas);
  663. if (xas_invalid(xas))
  664. return first;
  665. node = xas->xa_node;
  666. if (node && (xas->xa_shift < node->shift))
  667. xas->xa_sibs = 0;
  668. if ((first == entry) && !xas->xa_sibs)
  669. return first;
  670. next = first;
  671. offset = xas->xa_offset;
  672. max = xas->xa_offset + xas->xa_sibs;
  673. if (node) {
  674. slot = &node->slots[offset];
  675. if (xas->xa_sibs)
  676. xas_squash_marks(xas);
  677. }
  678. if (!entry)
  679. xas_init_marks(xas);
  680. for (;;) {
  681. /*
  682. * Must clear the marks before setting the entry to NULL,
  683. * otherwise xas_for_each_marked may find a NULL entry and
  684. * stop early. rcu_assign_pointer contains a release barrier
  685. * so the mark clearing will appear to happen before the
  686. * entry is set to NULL.
  687. */
  688. rcu_assign_pointer(*slot, entry);
  689. if (xa_is_node(next))
  690. xas_free_nodes(xas, xa_to_node(next));
  691. if (!node)
  692. break;
  693. count += !next - !entry;
  694. values += !xa_is_value(first) - !value;
  695. if (entry) {
  696. if (offset == max)
  697. break;
  698. if (!xa_is_sibling(entry))
  699. entry = xa_mk_sibling(xas->xa_offset);
  700. } else {
  701. if (offset == XA_CHUNK_MASK)
  702. break;
  703. }
  704. next = xa_entry_locked(xas->xa, node, ++offset);
  705. if (!xa_is_sibling(next)) {
  706. if (!entry && (offset > max))
  707. break;
  708. first = next;
  709. }
  710. slot++;
  711. }
  712. update_node(xas, node, count, values);
  713. return first;
  714. }
  715. EXPORT_SYMBOL_GPL(xas_store);
  716. /**
  717. * xas_get_mark() - Returns the state of this mark.
  718. * @xas: XArray operation state.
  719. * @mark: Mark number.
  720. *
  721. * Return: true if the mark is set, false if the mark is clear or @xas
  722. * is in an error state.
  723. */
  724. bool xas_get_mark(const struct xa_state *xas, xa_mark_t mark)
  725. {
  726. if (xas_invalid(xas))
  727. return false;
  728. if (!xas->xa_node)
  729. return xa_marked(xas->xa, mark);
  730. return node_get_mark(xas->xa_node, xas->xa_offset, mark);
  731. }
  732. EXPORT_SYMBOL_GPL(xas_get_mark);
  733. /**
  734. * xas_set_mark() - Sets the mark on this entry and its parents.
  735. * @xas: XArray operation state.
  736. * @mark: Mark number.
  737. *
  738. * Sets the specified mark on this entry, and walks up the tree setting it
  739. * on all the ancestor entries. Does nothing if @xas has not been walked to
  740. * an entry, or is in an error state.
  741. */
  742. void xas_set_mark(const struct xa_state *xas, xa_mark_t mark)
  743. {
  744. struct xa_node *node = xas->xa_node;
  745. unsigned int offset = xas->xa_offset;
  746. if (xas_invalid(xas))
  747. return;
  748. while (node) {
  749. if (node_set_mark(node, offset, mark))
  750. return;
  751. offset = node->offset;
  752. node = xa_parent_locked(xas->xa, node);
  753. }
  754. if (!xa_marked(xas->xa, mark))
  755. xa_mark_set(xas->xa, mark);
  756. }
  757. EXPORT_SYMBOL_GPL(xas_set_mark);
  758. /**
  759. * xas_clear_mark() - Clears the mark on this entry and its parents.
  760. * @xas: XArray operation state.
  761. * @mark: Mark number.
  762. *
  763. * Clears the specified mark on this entry, and walks back to the head
  764. * attempting to clear it on all the ancestor entries. Does nothing if
  765. * @xas has not been walked to an entry, or is in an error state.
  766. */
  767. void xas_clear_mark(const struct xa_state *xas, xa_mark_t mark)
  768. {
  769. struct xa_node *node = xas->xa_node;
  770. unsigned int offset = xas->xa_offset;
  771. if (xas_invalid(xas))
  772. return;
  773. while (node) {
  774. if (!node_clear_mark(node, offset, mark))
  775. return;
  776. if (node_any_mark(node, mark))
  777. return;
  778. offset = node->offset;
  779. node = xa_parent_locked(xas->xa, node);
  780. }
  781. if (xa_marked(xas->xa, mark))
  782. xa_mark_clear(xas->xa, mark);
  783. }
  784. EXPORT_SYMBOL_GPL(xas_clear_mark);
  785. /**
  786. * xas_init_marks() - Initialise all marks for the entry
  787. * @xas: Array operations state.
  788. *
  789. * Initialise all marks for the entry specified by @xas. If we're tracking
  790. * free entries with a mark, we need to set it on all entries. All other
  791. * marks are cleared.
  792. *
  793. * This implementation is not as efficient as it could be; we may walk
  794. * up the tree multiple times.
  795. */
  796. void xas_init_marks(const struct xa_state *xas)
  797. {
  798. xa_mark_t mark = 0;
  799. for (;;) {
  800. if (xa_track_free(xas->xa) && mark == XA_FREE_MARK)
  801. xas_set_mark(xas, mark);
  802. else
  803. xas_clear_mark(xas, mark);
  804. if (mark == XA_MARK_MAX)
  805. break;
  806. mark_inc(mark);
  807. }
  808. }
  809. EXPORT_SYMBOL_GPL(xas_init_marks);
  810. /**
  811. * xas_pause() - Pause a walk to drop a lock.
  812. * @xas: XArray operation state.
  813. *
  814. * Some users need to pause a walk and drop the lock they're holding in
  815. * order to yield to a higher priority thread or carry out an operation
  816. * on an entry. Those users should call this function before they drop
  817. * the lock. It resets the @xas to be suitable for the next iteration
  818. * of the loop after the user has reacquired the lock. If most entries
  819. * found during a walk require you to call xas_pause(), the xa_for_each()
  820. * iterator may be more appropriate.
  821. *
  822. * Note that xas_pause() only works for forward iteration. If a user needs
  823. * to pause a reverse iteration, we will need a xas_pause_rev().
  824. */
  825. void xas_pause(struct xa_state *xas)
  826. {
  827. struct xa_node *node = xas->xa_node;
  828. if (xas_invalid(xas))
  829. return;
  830. if (node) {
  831. unsigned int offset = xas->xa_offset;
  832. while (++offset < XA_CHUNK_SIZE) {
  833. if (!xa_is_sibling(xa_entry(xas->xa, node, offset)))
  834. break;
  835. }
  836. xas->xa_index += (offset - xas->xa_offset) << node->shift;
  837. } else {
  838. xas->xa_index++;
  839. }
  840. xas->xa_node = XAS_RESTART;
  841. }
  842. EXPORT_SYMBOL_GPL(xas_pause);
  843. /*
  844. * __xas_prev() - Find the previous entry in the XArray.
  845. * @xas: XArray operation state.
  846. *
  847. * Helper function for xas_prev() which handles all the complex cases
  848. * out of line.
  849. */
  850. void *__xas_prev(struct xa_state *xas)
  851. {
  852. void *entry;
  853. if (!xas_frozen(xas->xa_node))
  854. xas->xa_index--;
  855. if (xas_not_node(xas->xa_node))
  856. return xas_load(xas);
  857. if (xas->xa_offset != get_offset(xas->xa_index, xas->xa_node))
  858. xas->xa_offset--;
  859. while (xas->xa_offset == 255) {
  860. xas->xa_offset = xas->xa_node->offset - 1;
  861. xas->xa_node = xa_parent(xas->xa, xas->xa_node);
  862. if (!xas->xa_node)
  863. return set_bounds(xas);
  864. }
  865. for (;;) {
  866. entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
  867. if (!xa_is_node(entry))
  868. return entry;
  869. xas->xa_node = xa_to_node(entry);
  870. xas_set_offset(xas);
  871. }
  872. }
  873. EXPORT_SYMBOL_GPL(__xas_prev);
  874. /*
  875. * __xas_next() - Find the next entry in the XArray.
  876. * @xas: XArray operation state.
  877. *
  878. * Helper function for xas_next() which handles all the complex cases
  879. * out of line.
  880. */
  881. void *__xas_next(struct xa_state *xas)
  882. {
  883. void *entry;
  884. if (!xas_frozen(xas->xa_node))
  885. xas->xa_index++;
  886. if (xas_not_node(xas->xa_node))
  887. return xas_load(xas);
  888. if (xas->xa_offset != get_offset(xas->xa_index, xas->xa_node))
  889. xas->xa_offset++;
  890. while (xas->xa_offset == XA_CHUNK_SIZE) {
  891. xas->xa_offset = xas->xa_node->offset + 1;
  892. xas->xa_node = xa_parent(xas->xa, xas->xa_node);
  893. if (!xas->xa_node)
  894. return set_bounds(xas);
  895. }
  896. for (;;) {
  897. entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
  898. if (!xa_is_node(entry))
  899. return entry;
  900. xas->xa_node = xa_to_node(entry);
  901. xas_set_offset(xas);
  902. }
  903. }
  904. EXPORT_SYMBOL_GPL(__xas_next);
  905. /**
  906. * xas_find() - Find the next present entry in the XArray.
  907. * @xas: XArray operation state.
  908. * @max: Highest index to return.
  909. *
  910. * If the @xas has not yet been walked to an entry, return the entry
  911. * which has an index >= xas.xa_index. If it has been walked, the entry
  912. * currently being pointed at has been processed, and so we move to the
  913. * next entry.
  914. *
  915. * If no entry is found and the array is smaller than @max, the iterator
  916. * is set to the smallest index not yet in the array. This allows @xas
  917. * to be immediately passed to xas_store().
  918. *
  919. * Return: The entry, if found, otherwise %NULL.
  920. */
  921. void *xas_find(struct xa_state *xas, unsigned long max)
  922. {
  923. void *entry;
  924. if (xas_error(xas))
  925. return NULL;
  926. if (!xas->xa_node) {
  927. xas->xa_index = 1;
  928. return set_bounds(xas);
  929. } else if (xas_top(xas->xa_node)) {
  930. entry = xas_load(xas);
  931. if (entry || xas_not_node(xas->xa_node))
  932. return entry;
  933. } else if (!xas->xa_node->shift &&
  934. xas->xa_offset != (xas->xa_index & XA_CHUNK_MASK)) {
  935. xas->xa_offset = ((xas->xa_index - 1) & XA_CHUNK_MASK) + 1;
  936. }
  937. xas_advance(xas);
  938. while (xas->xa_node && (xas->xa_index <= max)) {
  939. if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) {
  940. xas->xa_offset = xas->xa_node->offset + 1;
  941. xas->xa_node = xa_parent(xas->xa, xas->xa_node);
  942. continue;
  943. }
  944. entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
  945. if (xa_is_node(entry)) {
  946. xas->xa_node = xa_to_node(entry);
  947. xas->xa_offset = 0;
  948. continue;
  949. }
  950. if (entry && !xa_is_sibling(entry))
  951. return entry;
  952. xas_advance(xas);
  953. }
  954. if (!xas->xa_node)
  955. xas->xa_node = XAS_BOUNDS;
  956. return NULL;
  957. }
  958. EXPORT_SYMBOL_GPL(xas_find);
  959. /**
  960. * xas_find_marked() - Find the next marked entry in the XArray.
  961. * @xas: XArray operation state.
  962. * @max: Highest index to return.
  963. * @mark: Mark number to search for.
  964. *
  965. * If the @xas has not yet been walked to an entry, return the marked entry
  966. * which has an index >= xas.xa_index. If it has been walked, the entry
  967. * currently being pointed at has been processed, and so we return the
  968. * first marked entry with an index > xas.xa_index.
  969. *
  970. * If no marked entry is found and the array is smaller than @max, @xas is
  971. * set to the bounds state and xas->xa_index is set to the smallest index
  972. * not yet in the array. This allows @xas to be immediately passed to
  973. * xas_store().
  974. *
  975. * If no entry is found before @max is reached, @xas is set to the restart
  976. * state.
  977. *
  978. * Return: The entry, if found, otherwise %NULL.
  979. */
  980. void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark)
  981. {
  982. bool advance = true;
  983. unsigned int offset;
  984. void *entry;
  985. if (xas_error(xas))
  986. return NULL;
  987. if (!xas->xa_node) {
  988. xas->xa_index = 1;
  989. goto out;
  990. } else if (xas_top(xas->xa_node)) {
  991. advance = false;
  992. entry = xa_head(xas->xa);
  993. xas->xa_node = NULL;
  994. if (xas->xa_index > max_index(entry))
  995. goto bounds;
  996. if (!xa_is_node(entry)) {
  997. if (xa_marked(xas->xa, mark))
  998. return entry;
  999. xas->xa_index = 1;
  1000. goto out;
  1001. }
  1002. xas->xa_node = xa_to_node(entry);
  1003. xas->xa_offset = xas->xa_index >> xas->xa_node->shift;
  1004. }
  1005. while (xas->xa_index <= max) {
  1006. if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) {
  1007. xas->xa_offset = xas->xa_node->offset + 1;
  1008. xas->xa_node = xa_parent(xas->xa, xas->xa_node);
  1009. if (!xas->xa_node)
  1010. break;
  1011. advance = false;
  1012. continue;
  1013. }
  1014. if (!advance) {
  1015. entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
  1016. if (xa_is_sibling(entry)) {
  1017. xas->xa_offset = xa_to_sibling(entry);
  1018. xas_move_index(xas, xas->xa_offset);
  1019. }
  1020. }
  1021. offset = xas_find_chunk(xas, advance, mark);
  1022. if (offset > xas->xa_offset) {
  1023. advance = false;
  1024. xas_move_index(xas, offset);
  1025. /* Mind the wrap */
  1026. if ((xas->xa_index - 1) >= max)
  1027. goto max;
  1028. xas->xa_offset = offset;
  1029. if (offset == XA_CHUNK_SIZE)
  1030. continue;
  1031. }
  1032. entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
  1033. if (!xa_is_node(entry))
  1034. return entry;
  1035. xas->xa_node = xa_to_node(entry);
  1036. xas_set_offset(xas);
  1037. }
  1038. out:
  1039. if (!max)
  1040. goto max;
  1041. bounds:
  1042. xas->xa_node = XAS_BOUNDS;
  1043. return NULL;
  1044. max:
  1045. xas->xa_node = XAS_RESTART;
  1046. return NULL;
  1047. }
  1048. EXPORT_SYMBOL_GPL(xas_find_marked);
  1049. /**
  1050. * xas_find_conflict() - Find the next present entry in a range.
  1051. * @xas: XArray operation state.
  1052. *
  1053. * The @xas describes both a range and a position within that range.
  1054. *
  1055. * Context: Any context. Expects xa_lock to be held.
  1056. * Return: The next entry in the range covered by @xas or %NULL.
  1057. */
  1058. void *xas_find_conflict(struct xa_state *xas)
  1059. {
  1060. void *curr;
  1061. if (xas_error(xas))
  1062. return NULL;
  1063. if (!xas->xa_node)
  1064. return NULL;
  1065. if (xas_top(xas->xa_node)) {
  1066. curr = xas_start(xas);
  1067. if (!curr)
  1068. return NULL;
  1069. while (xa_is_node(curr)) {
  1070. struct xa_node *node = xa_to_node(curr);
  1071. curr = xas_descend(xas, node);
  1072. }
  1073. if (curr)
  1074. return curr;
  1075. }
  1076. if (xas->xa_node->shift > xas->xa_shift)
  1077. return NULL;
  1078. for (;;) {
  1079. if (xas->xa_node->shift == xas->xa_shift) {
  1080. if ((xas->xa_offset & xas->xa_sibs) == xas->xa_sibs)
  1081. break;
  1082. } else if (xas->xa_offset == XA_CHUNK_MASK) {
  1083. xas->xa_offset = xas->xa_node->offset;
  1084. xas->xa_node = xa_parent_locked(xas->xa, xas->xa_node);
  1085. if (!xas->xa_node)
  1086. break;
  1087. continue;
  1088. }
  1089. curr = xa_entry_locked(xas->xa, xas->xa_node, ++xas->xa_offset);
  1090. if (xa_is_sibling(curr))
  1091. continue;
  1092. while (xa_is_node(curr)) {
  1093. xas->xa_node = xa_to_node(curr);
  1094. xas->xa_offset = 0;
  1095. curr = xa_entry_locked(xas->xa, xas->xa_node, 0);
  1096. }
  1097. if (curr)
  1098. return curr;
  1099. }
  1100. xas->xa_offset -= xas->xa_sibs;
  1101. return NULL;
  1102. }
  1103. EXPORT_SYMBOL_GPL(xas_find_conflict);
  1104. /**
  1105. * xa_init_flags() - Initialise an empty XArray with flags.
  1106. * @xa: XArray.
  1107. * @flags: XA_FLAG values.
  1108. *
  1109. * If you need to initialise an XArray with special flags (eg you need
  1110. * to take the lock from interrupt context), use this function instead
  1111. * of xa_init().
  1112. *
  1113. * Context: Any context.
  1114. */
  1115. void xa_init_flags(struct xarray *xa, gfp_t flags)
  1116. {
  1117. unsigned int lock_type;
  1118. static struct lock_class_key xa_lock_irq;
  1119. static struct lock_class_key xa_lock_bh;
  1120. spin_lock_init(&xa->xa_lock);
  1121. xa->xa_flags = flags;
  1122. xa->xa_head = NULL;
  1123. lock_type = xa_lock_type(xa);
  1124. if (lock_type == XA_LOCK_IRQ)
  1125. lockdep_set_class(&xa->xa_lock, &xa_lock_irq);
  1126. else if (lock_type == XA_LOCK_BH)
  1127. lockdep_set_class(&xa->xa_lock, &xa_lock_bh);
  1128. }
  1129. EXPORT_SYMBOL(xa_init_flags);
  1130. /**
  1131. * xa_load() - Load an entry from an XArray.
  1132. * @xa: XArray.
  1133. * @index: index into array.
  1134. *
  1135. * Context: Any context. Takes and releases the RCU lock.
  1136. * Return: The entry at @index in @xa.
  1137. */
  1138. void *xa_load(struct xarray *xa, unsigned long index)
  1139. {
  1140. XA_STATE(xas, xa, index);
  1141. void *entry;
  1142. rcu_read_lock();
  1143. do {
  1144. entry = xas_load(&xas);
  1145. if (xa_is_zero(entry))
  1146. entry = NULL;
  1147. } while (xas_retry(&xas, entry));
  1148. rcu_read_unlock();
  1149. return entry;
  1150. }
  1151. EXPORT_SYMBOL(xa_load);
  1152. static void *xas_result(struct xa_state *xas, void *curr)
  1153. {
  1154. if (xa_is_zero(curr))
  1155. return NULL;
  1156. XA_NODE_BUG_ON(xas->xa_node, xa_is_internal(curr));
  1157. if (xas_error(xas))
  1158. curr = xas->xa_node;
  1159. return curr;
  1160. }
  1161. /**
  1162. * __xa_erase() - Erase this entry from the XArray while locked.
  1163. * @xa: XArray.
  1164. * @index: Index into array.
  1165. *
  1166. * If the entry at this index is a multi-index entry then all indices will
  1167. * be erased, and the entry will no longer be a multi-index entry.
  1168. * This function expects the xa_lock to be held on entry.
  1169. *
  1170. * Context: Any context. Expects xa_lock to be held on entry. May
  1171. * release and reacquire xa_lock if @gfp flags permit.
  1172. * Return: The old entry at this index.
  1173. */
  1174. void *__xa_erase(struct xarray *xa, unsigned long index)
  1175. {
  1176. XA_STATE(xas, xa, index);
  1177. return xas_result(&xas, xas_store(&xas, NULL));
  1178. }
  1179. EXPORT_SYMBOL_GPL(__xa_erase);
  1180. /**
  1181. * xa_store() - Store this entry in the XArray.
  1182. * @xa: XArray.
  1183. * @index: Index into array.
  1184. * @entry: New entry.
  1185. * @gfp: Memory allocation flags.
  1186. *
  1187. * After this function returns, loads from this index will return @entry.
  1188. * Storing into an existing multislot entry updates the entry of every index.
  1189. * The marks associated with @index are unaffected unless @entry is %NULL.
  1190. *
  1191. * Context: Process context. Takes and releases the xa_lock. May sleep
  1192. * if the @gfp flags permit.
  1193. * Return: The old entry at this index on success, xa_err(-EINVAL) if @entry
  1194. * cannot be stored in an XArray, or xa_err(-ENOMEM) if memory allocation
  1195. * failed.
  1196. */
  1197. void *xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
  1198. {
  1199. XA_STATE(xas, xa, index);
  1200. void *curr;
  1201. if (WARN_ON_ONCE(xa_is_internal(entry)))
  1202. return XA_ERROR(-EINVAL);
  1203. do {
  1204. xas_lock(&xas);
  1205. curr = xas_store(&xas, entry);
  1206. if (xa_track_free(xa) && entry)
  1207. xas_clear_mark(&xas, XA_FREE_MARK);
  1208. xas_unlock(&xas);
  1209. } while (xas_nomem(&xas, gfp));
  1210. return xas_result(&xas, curr);
  1211. }
  1212. EXPORT_SYMBOL(xa_store);
  1213. /**
  1214. * __xa_store() - Store this entry in the XArray.
  1215. * @xa: XArray.
  1216. * @index: Index into array.
  1217. * @entry: New entry.
  1218. * @gfp: Memory allocation flags.
  1219. *
  1220. * You must already be holding the xa_lock when calling this function.
  1221. * It will drop the lock if needed to allocate memory, and then reacquire
  1222. * it afterwards.
  1223. *
  1224. * Context: Any context. Expects xa_lock to be held on entry. May
  1225. * release and reacquire xa_lock if @gfp flags permit.
  1226. * Return: The old entry at this index or xa_err() if an error happened.
  1227. */
  1228. void *__xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
  1229. {
  1230. XA_STATE(xas, xa, index);
  1231. void *curr;
  1232. if (WARN_ON_ONCE(xa_is_internal(entry)))
  1233. return XA_ERROR(-EINVAL);
  1234. do {
  1235. curr = xas_store(&xas, entry);
  1236. if (xa_track_free(xa) && entry)
  1237. xas_clear_mark(&xas, XA_FREE_MARK);
  1238. } while (__xas_nomem(&xas, gfp));
  1239. return xas_result(&xas, curr);
  1240. }
  1241. EXPORT_SYMBOL(__xa_store);
  1242. /**
  1243. * xa_cmpxchg() - Conditionally replace an entry in the XArray.
  1244. * @xa: XArray.
  1245. * @index: Index into array.
  1246. * @old: Old value to test against.
  1247. * @entry: New value to place in array.
  1248. * @gfp: Memory allocation flags.
  1249. *
  1250. * If the entry at @index is the same as @old, replace it with @entry.
  1251. * If the return value is equal to @old, then the exchange was successful.
  1252. *
  1253. * Context: Process context. Takes and releases the xa_lock. May sleep
  1254. * if the @gfp flags permit.
  1255. * Return: The old value at this index or xa_err() if an error happened.
  1256. */
  1257. void *xa_cmpxchg(struct xarray *xa, unsigned long index,
  1258. void *old, void *entry, gfp_t gfp)
  1259. {
  1260. XA_STATE(xas, xa, index);
  1261. void *curr;
  1262. if (WARN_ON_ONCE(xa_is_internal(entry)))
  1263. return XA_ERROR(-EINVAL);
  1264. do {
  1265. xas_lock(&xas);
  1266. curr = xas_load(&xas);
  1267. if (curr == XA_ZERO_ENTRY)
  1268. curr = NULL;
  1269. if (curr == old) {
  1270. xas_store(&xas, entry);
  1271. if (xa_track_free(xa) && entry)
  1272. xas_clear_mark(&xas, XA_FREE_MARK);
  1273. }
  1274. xas_unlock(&xas);
  1275. } while (xas_nomem(&xas, gfp));
  1276. return xas_result(&xas, curr);
  1277. }
  1278. EXPORT_SYMBOL(xa_cmpxchg);
  1279. /**
  1280. * __xa_cmpxchg() - Store this entry in the XArray.
  1281. * @xa: XArray.
  1282. * @index: Index into array.
  1283. * @old: Old value to test against.
  1284. * @entry: New entry.
  1285. * @gfp: Memory allocation flags.
  1286. *
  1287. * You must already be holding the xa_lock when calling this function.
  1288. * It will drop the lock if needed to allocate memory, and then reacquire
  1289. * it afterwards.
  1290. *
  1291. * Context: Any context. Expects xa_lock to be held on entry. May
  1292. * release and reacquire xa_lock if @gfp flags permit.
  1293. * Return: The old entry at this index or xa_err() if an error happened.
  1294. */
  1295. void *__xa_cmpxchg(struct xarray *xa, unsigned long index,
  1296. void *old, void *entry, gfp_t gfp)
  1297. {
  1298. XA_STATE(xas, xa, index);
  1299. void *curr;
  1300. if (WARN_ON_ONCE(xa_is_internal(entry)))
  1301. return XA_ERROR(-EINVAL);
  1302. do {
  1303. curr = xas_load(&xas);
  1304. if (curr == XA_ZERO_ENTRY)
  1305. curr = NULL;
  1306. if (curr == old) {
  1307. xas_store(&xas, entry);
  1308. if (xa_track_free(xa) && entry)
  1309. xas_clear_mark(&xas, XA_FREE_MARK);
  1310. }
  1311. } while (__xas_nomem(&xas, gfp));
  1312. return xas_result(&xas, curr);
  1313. }
  1314. EXPORT_SYMBOL(__xa_cmpxchg);
  1315. /**
  1316. * xa_reserve() - Reserve this index in the XArray.
  1317. * @xa: XArray.
  1318. * @index: Index into array.
  1319. * @gfp: Memory allocation flags.
  1320. *
  1321. * Ensures there is somewhere to store an entry at @index in the array.
  1322. * If there is already something stored at @index, this function does
  1323. * nothing. If there was nothing there, the entry is marked as reserved.
  1324. * Loads from @index will continue to see a %NULL pointer until a
  1325. * subsequent store to @index.
  1326. *
  1327. * If you do not use the entry that you have reserved, call xa_release()
  1328. * or xa_erase() to free any unnecessary memory.
  1329. *
  1330. * Context: Process context. Takes and releases the xa_lock, IRQ or BH safe
  1331. * if specified in XArray flags. May sleep if the @gfp flags permit.
  1332. * Return: 0 if the reservation succeeded or -ENOMEM if it failed.
  1333. */
  1334. int xa_reserve(struct xarray *xa, unsigned long index, gfp_t gfp)
  1335. {
  1336. XA_STATE(xas, xa, index);
  1337. unsigned int lock_type = xa_lock_type(xa);
  1338. void *curr;
  1339. do {
  1340. xas_lock_type(&xas, lock_type);
  1341. curr = xas_load(&xas);
  1342. if (!curr)
  1343. xas_store(&xas, XA_ZERO_ENTRY);
  1344. xas_unlock_type(&xas, lock_type);
  1345. } while (xas_nomem(&xas, gfp));
  1346. return xas_error(&xas);
  1347. }
  1348. EXPORT_SYMBOL(xa_reserve);
  1349. /**
  1350. * __xa_alloc() - Find somewhere to store this entry in the XArray.
  1351. * @xa: XArray.
  1352. * @id: Pointer to ID.
  1353. * @max: Maximum ID to allocate (inclusive).
  1354. * @entry: New entry.
  1355. * @gfp: Memory allocation flags.
  1356. *
  1357. * Allocates an unused ID in the range specified by @id and @max.
  1358. * Updates the @id pointer with the index, then stores the entry at that
  1359. * index. A concurrent lookup will not see an uninitialised @id.
  1360. *
  1361. * Context: Any context. Expects xa_lock to be held on entry. May
  1362. * release and reacquire xa_lock if @gfp flags permit.
  1363. * Return: 0 on success, -ENOMEM if memory allocation fails or -ENOSPC if
  1364. * there is no more space in the XArray.
  1365. */
  1366. int __xa_alloc(struct xarray *xa, u32 *id, u32 max, void *entry, gfp_t gfp)
  1367. {
  1368. XA_STATE(xas, xa, 0);
  1369. int err;
  1370. if (WARN_ON_ONCE(xa_is_internal(entry)))
  1371. return -EINVAL;
  1372. if (WARN_ON_ONCE(!xa_track_free(xa)))
  1373. return -EINVAL;
  1374. if (!entry)
  1375. entry = XA_ZERO_ENTRY;
  1376. do {
  1377. xas.xa_index = *id;
  1378. xas_find_marked(&xas, max, XA_FREE_MARK);
  1379. if (xas.xa_node == XAS_RESTART)
  1380. xas_set_err(&xas, -ENOSPC);
  1381. xas_store(&xas, entry);
  1382. xas_clear_mark(&xas, XA_FREE_MARK);
  1383. } while (__xas_nomem(&xas, gfp));
  1384. err = xas_error(&xas);
  1385. if (!err)
  1386. *id = xas.xa_index;
  1387. return err;
  1388. }
  1389. EXPORT_SYMBOL(__xa_alloc);
  1390. /**
  1391. * __xa_set_mark() - Set this mark on this entry while locked.
  1392. * @xa: XArray.
  1393. * @index: Index of entry.
  1394. * @mark: Mark number.
  1395. *
  1396. * Attempting to set a mark on a NULL entry does not succeed.
  1397. *
  1398. * Context: Any context. Expects xa_lock to be held on entry.
  1399. */
  1400. void __xa_set_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
  1401. {
  1402. XA_STATE(xas, xa, index);
  1403. void *entry = xas_load(&xas);
  1404. if (entry)
  1405. xas_set_mark(&xas, mark);
  1406. }
  1407. EXPORT_SYMBOL_GPL(__xa_set_mark);
  1408. /**
  1409. * __xa_clear_mark() - Clear this mark on this entry while locked.
  1410. * @xa: XArray.
  1411. * @index: Index of entry.
  1412. * @mark: Mark number.
  1413. *
  1414. * Context: Any context. Expects xa_lock to be held on entry.
  1415. */
  1416. void __xa_clear_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
  1417. {
  1418. XA_STATE(xas, xa, index);
  1419. void *entry = xas_load(&xas);
  1420. if (entry)
  1421. xas_clear_mark(&xas, mark);
  1422. }
  1423. EXPORT_SYMBOL_GPL(__xa_clear_mark);
  1424. /**
  1425. * xa_get_mark() - Inquire whether this mark is set on this entry.
  1426. * @xa: XArray.
  1427. * @index: Index of entry.
  1428. * @mark: Mark number.
  1429. *
  1430. * This function uses the RCU read lock, so the result may be out of date
  1431. * by the time it returns. If you need the result to be stable, use a lock.
  1432. *
  1433. * Context: Any context. Takes and releases the RCU lock.
  1434. * Return: True if the entry at @index has this mark set, false if it doesn't.
  1435. */
  1436. bool xa_get_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
  1437. {
  1438. XA_STATE(xas, xa, index);
  1439. void *entry;
  1440. rcu_read_lock();
  1441. entry = xas_start(&xas);
  1442. while (xas_get_mark(&xas, mark)) {
  1443. if (!xa_is_node(entry))
  1444. goto found;
  1445. entry = xas_descend(&xas, xa_to_node(entry));
  1446. }
  1447. rcu_read_unlock();
  1448. return false;
  1449. found:
  1450. rcu_read_unlock();
  1451. return true;
  1452. }
  1453. EXPORT_SYMBOL(xa_get_mark);
  1454. /**
  1455. * xa_set_mark() - Set this mark on this entry.
  1456. * @xa: XArray.
  1457. * @index: Index of entry.
  1458. * @mark: Mark number.
  1459. *
  1460. * Attempting to set a mark on a NULL entry does not succeed.
  1461. *
  1462. * Context: Process context. Takes and releases the xa_lock.
  1463. */
  1464. void xa_set_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
  1465. {
  1466. xa_lock(xa);
  1467. __xa_set_mark(xa, index, mark);
  1468. xa_unlock(xa);
  1469. }
  1470. EXPORT_SYMBOL(xa_set_mark);
  1471. /**
  1472. * xa_clear_mark() - Clear this mark on this entry.
  1473. * @xa: XArray.
  1474. * @index: Index of entry.
  1475. * @mark: Mark number.
  1476. *
  1477. * Clearing a mark always succeeds.
  1478. *
  1479. * Context: Process context. Takes and releases the xa_lock.
  1480. */
  1481. void xa_clear_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
  1482. {
  1483. xa_lock(xa);
  1484. __xa_clear_mark(xa, index, mark);
  1485. xa_unlock(xa);
  1486. }
  1487. EXPORT_SYMBOL(xa_clear_mark);
  1488. /**
  1489. * xa_find() - Search the XArray for an entry.
  1490. * @xa: XArray.
  1491. * @indexp: Pointer to an index.
  1492. * @max: Maximum index to search to.
  1493. * @filter: Selection criterion.
  1494. *
  1495. * Finds the entry in @xa which matches the @filter, and has the lowest
  1496. * index that is at least @indexp and no more than @max.
  1497. * If an entry is found, @indexp is updated to be the index of the entry.
  1498. * This function is protected by the RCU read lock, so it may not find
  1499. * entries which are being simultaneously added. It will not return an
  1500. * %XA_RETRY_ENTRY; if you need to see retry entries, use xas_find().
  1501. *
  1502. * Context: Any context. Takes and releases the RCU lock.
  1503. * Return: The entry, if found, otherwise %NULL.
  1504. */
  1505. void *xa_find(struct xarray *xa, unsigned long *indexp,
  1506. unsigned long max, xa_mark_t filter)
  1507. {
  1508. XA_STATE(xas, xa, *indexp);
  1509. void *entry;
  1510. rcu_read_lock();
  1511. do {
  1512. if ((__force unsigned int)filter < XA_MAX_MARKS)
  1513. entry = xas_find_marked(&xas, max, filter);
  1514. else
  1515. entry = xas_find(&xas, max);
  1516. } while (xas_retry(&xas, entry));
  1517. rcu_read_unlock();
  1518. if (entry)
  1519. *indexp = xas.xa_index;
  1520. return entry;
  1521. }
  1522. EXPORT_SYMBOL(xa_find);
  1523. /**
  1524. * xa_find_after() - Search the XArray for a present entry.
  1525. * @xa: XArray.
  1526. * @indexp: Pointer to an index.
  1527. * @max: Maximum index to search to.
  1528. * @filter: Selection criterion.
  1529. *
  1530. * Finds the entry in @xa which matches the @filter and has the lowest
  1531. * index that is above @indexp and no more than @max.
  1532. * If an entry is found, @indexp is updated to be the index of the entry.
  1533. * This function is protected by the RCU read lock, so it may miss entries
  1534. * which are being simultaneously added. It will not return an
  1535. * %XA_RETRY_ENTRY; if you need to see retry entries, use xas_find().
  1536. *
  1537. * Context: Any context. Takes and releases the RCU lock.
  1538. * Return: The pointer, if found, otherwise %NULL.
  1539. */
  1540. void *xa_find_after(struct xarray *xa, unsigned long *indexp,
  1541. unsigned long max, xa_mark_t filter)
  1542. {
  1543. XA_STATE(xas, xa, *indexp + 1);
  1544. void *entry;
  1545. rcu_read_lock();
  1546. for (;;) {
  1547. if ((__force unsigned int)filter < XA_MAX_MARKS)
  1548. entry = xas_find_marked(&xas, max, filter);
  1549. else
  1550. entry = xas_find(&xas, max);
  1551. if (xas.xa_shift) {
  1552. if (xas.xa_index & ((1UL << xas.xa_shift) - 1))
  1553. continue;
  1554. } else {
  1555. if (xas.xa_offset < (xas.xa_index & XA_CHUNK_MASK))
  1556. continue;
  1557. }
  1558. if (!xas_retry(&xas, entry))
  1559. break;
  1560. }
  1561. rcu_read_unlock();
  1562. if (entry)
  1563. *indexp = xas.xa_index;
  1564. return entry;
  1565. }
  1566. EXPORT_SYMBOL(xa_find_after);
  1567. static unsigned int xas_extract_present(struct xa_state *xas, void **dst,
  1568. unsigned long max, unsigned int n)
  1569. {
  1570. void *entry;
  1571. unsigned int i = 0;
  1572. rcu_read_lock();
  1573. xas_for_each(xas, entry, max) {
  1574. if (xas_retry(xas, entry))
  1575. continue;
  1576. dst[i++] = entry;
  1577. if (i == n)
  1578. break;
  1579. }
  1580. rcu_read_unlock();
  1581. return i;
  1582. }
  1583. static unsigned int xas_extract_marked(struct xa_state *xas, void **dst,
  1584. unsigned long max, unsigned int n, xa_mark_t mark)
  1585. {
  1586. void *entry;
  1587. unsigned int i = 0;
  1588. rcu_read_lock();
  1589. xas_for_each_marked(xas, entry, max, mark) {
  1590. if (xas_retry(xas, entry))
  1591. continue;
  1592. dst[i++] = entry;
  1593. if (i == n)
  1594. break;
  1595. }
  1596. rcu_read_unlock();
  1597. return i;
  1598. }
  1599. /**
  1600. * xa_extract() - Copy selected entries from the XArray into a normal array.
  1601. * @xa: The source XArray to copy from.
  1602. * @dst: The buffer to copy entries into.
  1603. * @start: The first index in the XArray eligible to be selected.
  1604. * @max: The last index in the XArray eligible to be selected.
  1605. * @n: The maximum number of entries to copy.
  1606. * @filter: Selection criterion.
  1607. *
  1608. * Copies up to @n entries that match @filter from the XArray. The
  1609. * copied entries will have indices between @start and @max, inclusive.
  1610. *
  1611. * The @filter may be an XArray mark value, in which case entries which are
  1612. * marked with that mark will be copied. It may also be %XA_PRESENT, in
  1613. * which case all entries which are not NULL will be copied.
  1614. *
  1615. * The entries returned may not represent a snapshot of the XArray at a
  1616. * moment in time. For example, if another thread stores to index 5, then
  1617. * index 10, calling xa_extract() may return the old contents of index 5
  1618. * and the new contents of index 10. Indices not modified while this
  1619. * function is running will not be skipped.
  1620. *
  1621. * If you need stronger guarantees, holding the xa_lock across calls to this
  1622. * function will prevent concurrent modification.
  1623. *
  1624. * Context: Any context. Takes and releases the RCU lock.
  1625. * Return: The number of entries copied.
  1626. */
  1627. unsigned int xa_extract(struct xarray *xa, void **dst, unsigned long start,
  1628. unsigned long max, unsigned int n, xa_mark_t filter)
  1629. {
  1630. XA_STATE(xas, xa, start);
  1631. if (!n)
  1632. return 0;
  1633. if ((__force unsigned int)filter < XA_MAX_MARKS)
  1634. return xas_extract_marked(&xas, dst, max, n, filter);
  1635. return xas_extract_present(&xas, dst, max, n);
  1636. }
  1637. EXPORT_SYMBOL(xa_extract);
  1638. /**
  1639. * xa_destroy() - Free all internal data structures.
  1640. * @xa: XArray.
  1641. *
  1642. * After calling this function, the XArray is empty and has freed all memory
  1643. * allocated for its internal data structures. You are responsible for
  1644. * freeing the objects referenced by the XArray.
  1645. *
  1646. * Context: Any context. Takes and releases the xa_lock, interrupt-safe.
  1647. */
  1648. void xa_destroy(struct xarray *xa)
  1649. {
  1650. XA_STATE(xas, xa, 0);
  1651. unsigned long flags;
  1652. void *entry;
  1653. xas.xa_node = NULL;
  1654. xas_lock_irqsave(&xas, flags);
  1655. entry = xa_head_locked(xa);
  1656. RCU_INIT_POINTER(xa->xa_head, NULL);
  1657. xas_init_marks(&xas);
  1658. /* lockdep checks we're still holding the lock in xas_free_nodes() */
  1659. if (xa_is_node(entry))
  1660. xas_free_nodes(&xas, xa_to_node(entry));
  1661. xas_unlock_irqrestore(&xas, flags);
  1662. }
  1663. EXPORT_SYMBOL(xa_destroy);
  1664. #ifdef XA_DEBUG
  1665. void xa_dump_node(const struct xa_node *node)
  1666. {
  1667. unsigned i, j;
  1668. if (!node)
  1669. return;
  1670. if ((unsigned long)node & 3) {
  1671. pr_cont("node %px\n", node);
  1672. return;
  1673. }
  1674. pr_cont("node %px %s %d parent %px shift %d count %d values %d "
  1675. "array %px list %px %px marks",
  1676. node, node->parent ? "offset" : "max", node->offset,
  1677. node->parent, node->shift, node->count, node->nr_values,
  1678. node->array, node->private_list.prev, node->private_list.next);
  1679. for (i = 0; i < XA_MAX_MARKS; i++)
  1680. for (j = 0; j < XA_MARK_LONGS; j++)
  1681. pr_cont(" %lx", node->marks[i][j]);
  1682. pr_cont("\n");
  1683. }
  1684. void xa_dump_index(unsigned long index, unsigned int shift)
  1685. {
  1686. if (!shift)
  1687. pr_info("%lu: ", index);
  1688. else if (shift >= BITS_PER_LONG)
  1689. pr_info("0-%lu: ", ~0UL);
  1690. else
  1691. pr_info("%lu-%lu: ", index, index | ((1UL << shift) - 1));
  1692. }
  1693. void xa_dump_entry(const void *entry, unsigned long index, unsigned long shift)
  1694. {
  1695. if (!entry)
  1696. return;
  1697. xa_dump_index(index, shift);
  1698. if (xa_is_node(entry)) {
  1699. if (shift == 0) {
  1700. pr_cont("%px\n", entry);
  1701. } else {
  1702. unsigned long i;
  1703. struct xa_node *node = xa_to_node(entry);
  1704. xa_dump_node(node);
  1705. for (i = 0; i < XA_CHUNK_SIZE; i++)
  1706. xa_dump_entry(node->slots[i],
  1707. index + (i << node->shift), node->shift);
  1708. }
  1709. } else if (xa_is_value(entry))
  1710. pr_cont("value %ld (0x%lx) [%px]\n", xa_to_value(entry),
  1711. xa_to_value(entry), entry);
  1712. else if (!xa_is_internal(entry))
  1713. pr_cont("%px\n", entry);
  1714. else if (xa_is_retry(entry))
  1715. pr_cont("retry (%ld)\n", xa_to_internal(entry));
  1716. else if (xa_is_sibling(entry))
  1717. pr_cont("sibling (slot %ld)\n", xa_to_sibling(entry));
  1718. else if (xa_is_zero(entry))
  1719. pr_cont("zero (%ld)\n", xa_to_internal(entry));
  1720. else
  1721. pr_cont("UNKNOWN ENTRY (%px)\n", entry);
  1722. }
  1723. void xa_dump(const struct xarray *xa)
  1724. {
  1725. void *entry = xa->xa_head;
  1726. unsigned int shift = 0;
  1727. pr_info("xarray: %px head %px flags %x marks %d %d %d\n", xa, entry,
  1728. xa->xa_flags, xa_marked(xa, XA_MARK_0),
  1729. xa_marked(xa, XA_MARK_1), xa_marked(xa, XA_MARK_2));
  1730. if (xa_is_node(entry))
  1731. shift = xa_to_node(entry)->shift + XA_CHUNK_SHIFT;
  1732. xa_dump_entry(entry, 0, shift);
  1733. }
  1734. #endif