xarray.c 51 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * XArray implementation
  4. * Copyright (c) 2017 Microsoft Corporation
  5. * Author: Matthew Wilcox <willy@infradead.org>
  6. */
  7. #include <linux/bitmap.h>
  8. #include <linux/export.h>
  9. #include <linux/list.h>
  10. #include <linux/slab.h>
  11. #include <linux/xarray.h>
  12. /*
  13. * Coding conventions in this file:
  14. *
  15. * @xa is used to refer to the entire xarray.
  16. * @xas is the 'xarray operation state'. It may be either a pointer to
  17. * an xa_state, or an xa_state stored on the stack. This is an unfortunate
  18. * ambiguity.
  19. * @index is the index of the entry being operated on
  20. * @mark is an xa_mark_t; a small number indicating one of the mark bits.
  21. * @node refers to an xa_node; usually the primary one being operated on by
  22. * this function.
  23. * @offset is the index into the slots array inside an xa_node.
  24. * @parent refers to the @xa_node closer to the head than @node.
  25. * @entry refers to something stored in a slot in the xarray
  26. */
  27. static inline unsigned int xa_lock_type(const struct xarray *xa)
  28. {
  29. return (__force unsigned int)xa->xa_flags & 3;
  30. }
  31. static inline void xas_lock_type(struct xa_state *xas, unsigned int lock_type)
  32. {
  33. if (lock_type == XA_LOCK_IRQ)
  34. xas_lock_irq(xas);
  35. else if (lock_type == XA_LOCK_BH)
  36. xas_lock_bh(xas);
  37. else
  38. xas_lock(xas);
  39. }
  40. static inline void xas_unlock_type(struct xa_state *xas, unsigned int lock_type)
  41. {
  42. if (lock_type == XA_LOCK_IRQ)
  43. xas_unlock_irq(xas);
  44. else if (lock_type == XA_LOCK_BH)
  45. xas_unlock_bh(xas);
  46. else
  47. xas_unlock(xas);
  48. }
  49. static inline bool xa_track_free(const struct xarray *xa)
  50. {
  51. return xa->xa_flags & XA_FLAGS_TRACK_FREE;
  52. }
  53. static inline void xa_mark_set(struct xarray *xa, xa_mark_t mark)
  54. {
  55. if (!(xa->xa_flags & XA_FLAGS_MARK(mark)))
  56. xa->xa_flags |= XA_FLAGS_MARK(mark);
  57. }
  58. static inline void xa_mark_clear(struct xarray *xa, xa_mark_t mark)
  59. {
  60. if (xa->xa_flags & XA_FLAGS_MARK(mark))
  61. xa->xa_flags &= ~(XA_FLAGS_MARK(mark));
  62. }
  63. static inline unsigned long *node_marks(struct xa_node *node, xa_mark_t mark)
  64. {
  65. return node->marks[(__force unsigned)mark];
  66. }
  67. static inline bool node_get_mark(struct xa_node *node,
  68. unsigned int offset, xa_mark_t mark)
  69. {
  70. return test_bit(offset, node_marks(node, mark));
  71. }
  72. /* returns true if the bit was set */
  73. static inline bool node_set_mark(struct xa_node *node, unsigned int offset,
  74. xa_mark_t mark)
  75. {
  76. return __test_and_set_bit(offset, node_marks(node, mark));
  77. }
  78. /* returns true if the bit was set */
  79. static inline bool node_clear_mark(struct xa_node *node, unsigned int offset,
  80. xa_mark_t mark)
  81. {
  82. return __test_and_clear_bit(offset, node_marks(node, mark));
  83. }
  84. static inline bool node_any_mark(struct xa_node *node, xa_mark_t mark)
  85. {
  86. return !bitmap_empty(node_marks(node, mark), XA_CHUNK_SIZE);
  87. }
  88. static inline void node_mark_all(struct xa_node *node, xa_mark_t mark)
  89. {
  90. bitmap_fill(node_marks(node, mark), XA_CHUNK_SIZE);
  91. }
  92. #define mark_inc(mark) do { \
  93. mark = (__force xa_mark_t)((__force unsigned)(mark) + 1); \
  94. } while (0)
  95. /*
  96. * xas_squash_marks() - Merge all marks to the first entry
  97. * @xas: Array operation state.
  98. *
  99. * Set a mark on the first entry if any entry has it set. Clear marks on
  100. * all sibling entries.
  101. */
  102. static void xas_squash_marks(const struct xa_state *xas)
  103. {
  104. unsigned int mark = 0;
  105. unsigned int limit = xas->xa_offset + xas->xa_sibs + 1;
  106. if (!xas->xa_sibs)
  107. return;
  108. do {
  109. unsigned long *marks = xas->xa_node->marks[mark];
  110. if (find_next_bit(marks, limit, xas->xa_offset + 1) == limit)
  111. continue;
  112. __set_bit(xas->xa_offset, marks);
  113. bitmap_clear(marks, xas->xa_offset + 1, xas->xa_sibs);
  114. } while (mark++ != (__force unsigned)XA_MARK_MAX);
  115. }
  116. /* extracts the offset within this node from the index */
  117. static unsigned int get_offset(unsigned long index, struct xa_node *node)
  118. {
  119. return (index >> node->shift) & XA_CHUNK_MASK;
  120. }
  121. static void xas_set_offset(struct xa_state *xas)
  122. {
  123. xas->xa_offset = get_offset(xas->xa_index, xas->xa_node);
  124. }
  125. /* move the index either forwards (find) or backwards (sibling slot) */
  126. static void xas_move_index(struct xa_state *xas, unsigned long offset)
  127. {
  128. unsigned int shift = xas->xa_node->shift;
  129. xas->xa_index &= ~XA_CHUNK_MASK << shift;
  130. xas->xa_index += offset << shift;
  131. }
  132. static void xas_advance(struct xa_state *xas)
  133. {
  134. xas->xa_offset++;
  135. xas_move_index(xas, xas->xa_offset);
  136. }
  137. static void *set_bounds(struct xa_state *xas)
  138. {
  139. xas->xa_node = XAS_BOUNDS;
  140. return NULL;
  141. }
  142. /*
  143. * Starts a walk. If the @xas is already valid, we assume that it's on
  144. * the right path and just return where we've got to. If we're in an
  145. * error state, return NULL. If the index is outside the current scope
  146. * of the xarray, return NULL without changing @xas->xa_node. Otherwise
  147. * set @xas->xa_node to NULL and return the current head of the array.
  148. */
  149. static void *xas_start(struct xa_state *xas)
  150. {
  151. void *entry;
  152. if (xas_valid(xas))
  153. return xas_reload(xas);
  154. if (xas_error(xas))
  155. return NULL;
  156. entry = xa_head(xas->xa);
  157. if (!xa_is_node(entry)) {
  158. if (xas->xa_index)
  159. return set_bounds(xas);
  160. } else {
  161. if ((xas->xa_index >> xa_to_node(entry)->shift) > XA_CHUNK_MASK)
  162. return set_bounds(xas);
  163. }
  164. xas->xa_node = NULL;
  165. return entry;
  166. }
  167. static void *xas_descend(struct xa_state *xas, struct xa_node *node)
  168. {
  169. unsigned int offset = get_offset(xas->xa_index, node);
  170. void *entry = xa_entry(xas->xa, node, offset);
  171. xas->xa_node = node;
  172. if (xa_is_sibling(entry)) {
  173. offset = xa_to_sibling(entry);
  174. entry = xa_entry(xas->xa, node, offset);
  175. }
  176. xas->xa_offset = offset;
  177. return entry;
  178. }
  179. /**
  180. * xas_load() - Load an entry from the XArray (advanced).
  181. * @xas: XArray operation state.
  182. *
  183. * Usually walks the @xas to the appropriate state to load the entry
  184. * stored at xa_index. However, it will do nothing and return %NULL if
  185. * @xas is in an error state. xas_load() will never expand the tree.
  186. *
  187. * If the xa_state is set up to operate on a multi-index entry, xas_load()
  188. * may return %NULL or an internal entry, even if there are entries
  189. * present within the range specified by @xas.
  190. *
  191. * Context: Any context. The caller should hold the xa_lock or the RCU lock.
  192. * Return: Usually an entry in the XArray, but see description for exceptions.
  193. */
  194. void *xas_load(struct xa_state *xas)
  195. {
  196. void *entry = xas_start(xas);
  197. while (xa_is_node(entry)) {
  198. struct xa_node *node = xa_to_node(entry);
  199. if (xas->xa_shift > node->shift)
  200. break;
  201. entry = xas_descend(xas, node);
  202. }
  203. return entry;
  204. }
  205. EXPORT_SYMBOL_GPL(xas_load);
  206. /* Move the radix tree node cache here */
  207. extern struct kmem_cache *radix_tree_node_cachep;
  208. extern void radix_tree_node_rcu_free(struct rcu_head *head);
  209. #define XA_RCU_FREE ((struct xarray *)1)
  210. static void xa_node_free(struct xa_node *node)
  211. {
  212. XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
  213. node->array = XA_RCU_FREE;
  214. call_rcu(&node->rcu_head, radix_tree_node_rcu_free);
  215. }
  216. /*
  217. * xas_destroy() - Free any resources allocated during the XArray operation.
  218. * @xas: XArray operation state.
  219. *
  220. * This function is now internal-only.
  221. */
  222. static void xas_destroy(struct xa_state *xas)
  223. {
  224. struct xa_node *node = xas->xa_alloc;
  225. if (!node)
  226. return;
  227. XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
  228. kmem_cache_free(radix_tree_node_cachep, node);
  229. xas->xa_alloc = NULL;
  230. }
  231. /**
  232. * xas_nomem() - Allocate memory if needed.
  233. * @xas: XArray operation state.
  234. * @gfp: Memory allocation flags.
  235. *
  236. * If we need to add new nodes to the XArray, we try to allocate memory
  237. * with GFP_NOWAIT while holding the lock, which will usually succeed.
  238. * If it fails, @xas is flagged as needing memory to continue. The caller
  239. * should drop the lock and call xas_nomem(). If xas_nomem() succeeds,
  240. * the caller should retry the operation.
  241. *
  242. * Forward progress is guaranteed as one node is allocated here and
  243. * stored in the xa_state where it will be found by xas_alloc(). More
  244. * nodes will likely be found in the slab allocator, but we do not tie
  245. * them up here.
  246. *
  247. * Return: true if memory was needed, and was successfully allocated.
  248. */
  249. bool xas_nomem(struct xa_state *xas, gfp_t gfp)
  250. {
  251. if (xas->xa_node != XA_ERROR(-ENOMEM)) {
  252. xas_destroy(xas);
  253. return false;
  254. }
  255. xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
  256. if (!xas->xa_alloc)
  257. return false;
  258. XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list));
  259. xas->xa_node = XAS_RESTART;
  260. return true;
  261. }
  262. EXPORT_SYMBOL_GPL(xas_nomem);
  263. /*
  264. * __xas_nomem() - Drop locks and allocate memory if needed.
  265. * @xas: XArray operation state.
  266. * @gfp: Memory allocation flags.
  267. *
  268. * Internal variant of xas_nomem().
  269. *
  270. * Return: true if memory was needed, and was successfully allocated.
  271. */
  272. static bool __xas_nomem(struct xa_state *xas, gfp_t gfp)
  273. __must_hold(xas->xa->xa_lock)
  274. {
  275. unsigned int lock_type = xa_lock_type(xas->xa);
  276. if (xas->xa_node != XA_ERROR(-ENOMEM)) {
  277. xas_destroy(xas);
  278. return false;
  279. }
  280. if (gfpflags_allow_blocking(gfp)) {
  281. xas_unlock_type(xas, lock_type);
  282. xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
  283. xas_lock_type(xas, lock_type);
  284. } else {
  285. xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
  286. }
  287. if (!xas->xa_alloc)
  288. return false;
  289. XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list));
  290. xas->xa_node = XAS_RESTART;
  291. return true;
  292. }
  293. static void xas_update(struct xa_state *xas, struct xa_node *node)
  294. {
  295. if (xas->xa_update)
  296. xas->xa_update(node);
  297. else
  298. XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
  299. }
  300. static void *xas_alloc(struct xa_state *xas, unsigned int shift)
  301. {
  302. struct xa_node *parent = xas->xa_node;
  303. struct xa_node *node = xas->xa_alloc;
  304. if (xas_invalid(xas))
  305. return NULL;
  306. if (node) {
  307. xas->xa_alloc = NULL;
  308. } else {
  309. node = kmem_cache_alloc(radix_tree_node_cachep,
  310. GFP_NOWAIT | __GFP_NOWARN);
  311. if (!node) {
  312. xas_set_err(xas, -ENOMEM);
  313. return NULL;
  314. }
  315. }
  316. if (parent) {
  317. node->offset = xas->xa_offset;
  318. parent->count++;
  319. XA_NODE_BUG_ON(node, parent->count > XA_CHUNK_SIZE);
  320. xas_update(xas, parent);
  321. }
  322. XA_NODE_BUG_ON(node, shift > BITS_PER_LONG);
  323. XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
  324. node->shift = shift;
  325. node->count = 0;
  326. node->nr_values = 0;
  327. RCU_INIT_POINTER(node->parent, xas->xa_node);
  328. node->array = xas->xa;
  329. return node;
  330. }
  331. #ifdef CONFIG_XARRAY_MULTI
  332. /* Returns the number of indices covered by a given xa_state */
  333. static unsigned long xas_size(const struct xa_state *xas)
  334. {
  335. return (xas->xa_sibs + 1UL) << xas->xa_shift;
  336. }
  337. #endif
  338. /*
  339. * Use this to calculate the maximum index that will need to be created
  340. * in order to add the entry described by @xas. Because we cannot store a
  341. * multiple-index entry at index 0, the calculation is a little more complex
  342. * than you might expect.
  343. */
  344. static unsigned long xas_max(struct xa_state *xas)
  345. {
  346. unsigned long max = xas->xa_index;
  347. #ifdef CONFIG_XARRAY_MULTI
  348. if (xas->xa_shift || xas->xa_sibs) {
  349. unsigned long mask = xas_size(xas) - 1;
  350. max |= mask;
  351. if (mask == max)
  352. max++;
  353. }
  354. #endif
  355. return max;
  356. }
  357. /* The maximum index that can be contained in the array without expanding it */
  358. static unsigned long max_index(void *entry)
  359. {
  360. if (!xa_is_node(entry))
  361. return 0;
  362. return (XA_CHUNK_SIZE << xa_to_node(entry)->shift) - 1;
  363. }
  364. static void xas_shrink(struct xa_state *xas)
  365. {
  366. struct xarray *xa = xas->xa;
  367. struct xa_node *node = xas->xa_node;
  368. for (;;) {
  369. void *entry;
  370. XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
  371. if (node->count != 1)
  372. break;
  373. entry = xa_entry_locked(xa, node, 0);
  374. if (!entry)
  375. break;
  376. if (!xa_is_node(entry) && node->shift)
  377. break;
  378. xas->xa_node = XAS_BOUNDS;
  379. RCU_INIT_POINTER(xa->xa_head, entry);
  380. if (xa_track_free(xa) && !node_get_mark(node, 0, XA_FREE_MARK))
  381. xa_mark_clear(xa, XA_FREE_MARK);
  382. node->count = 0;
  383. node->nr_values = 0;
  384. if (!xa_is_node(entry))
  385. RCU_INIT_POINTER(node->slots[0], XA_RETRY_ENTRY);
  386. xas_update(xas, node);
  387. xa_node_free(node);
  388. if (!xa_is_node(entry))
  389. break;
  390. node = xa_to_node(entry);
  391. node->parent = NULL;
  392. }
  393. }
  394. /*
  395. * xas_delete_node() - Attempt to delete an xa_node
  396. * @xas: Array operation state.
  397. *
  398. * Attempts to delete the @xas->xa_node. This will fail if xa->node has
  399. * a non-zero reference count.
  400. */
  401. static void xas_delete_node(struct xa_state *xas)
  402. {
  403. struct xa_node *node = xas->xa_node;
  404. for (;;) {
  405. struct xa_node *parent;
  406. XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
  407. if (node->count)
  408. break;
  409. parent = xa_parent_locked(xas->xa, node);
  410. xas->xa_node = parent;
  411. xas->xa_offset = node->offset;
  412. xa_node_free(node);
  413. if (!parent) {
  414. xas->xa->xa_head = NULL;
  415. xas->xa_node = XAS_BOUNDS;
  416. return;
  417. }
  418. parent->slots[xas->xa_offset] = NULL;
  419. parent->count--;
  420. XA_NODE_BUG_ON(parent, parent->count > XA_CHUNK_SIZE);
  421. node = parent;
  422. xas_update(xas, node);
  423. }
  424. if (!node->parent)
  425. xas_shrink(xas);
  426. }
  427. /**
  428. * xas_free_nodes() - Free this node and all nodes that it references
  429. * @xas: Array operation state.
  430. * @top: Node to free
  431. *
  432. * This node has been removed from the tree. We must now free it and all
  433. * of its subnodes. There may be RCU walkers with references into the tree,
  434. * so we must replace all entries with retry markers.
  435. */
  436. static void xas_free_nodes(struct xa_state *xas, struct xa_node *top)
  437. {
  438. unsigned int offset = 0;
  439. struct xa_node *node = top;
  440. for (;;) {
  441. void *entry = xa_entry_locked(xas->xa, node, offset);
  442. if (xa_is_node(entry)) {
  443. node = xa_to_node(entry);
  444. offset = 0;
  445. continue;
  446. }
  447. if (entry)
  448. RCU_INIT_POINTER(node->slots[offset], XA_RETRY_ENTRY);
  449. offset++;
  450. while (offset == XA_CHUNK_SIZE) {
  451. struct xa_node *parent;
  452. parent = xa_parent_locked(xas->xa, node);
  453. offset = node->offset + 1;
  454. node->count = 0;
  455. node->nr_values = 0;
  456. xas_update(xas, node);
  457. xa_node_free(node);
  458. if (node == top)
  459. return;
  460. node = parent;
  461. }
  462. }
  463. }
  464. /*
  465. * xas_expand adds nodes to the head of the tree until it has reached
  466. * sufficient height to be able to contain @xas->xa_index
  467. */
  468. static int xas_expand(struct xa_state *xas, void *head)
  469. {
  470. struct xarray *xa = xas->xa;
  471. struct xa_node *node = NULL;
  472. unsigned int shift = 0;
  473. unsigned long max = xas_max(xas);
  474. if (!head) {
  475. if (max == 0)
  476. return 0;
  477. while ((max >> shift) >= XA_CHUNK_SIZE)
  478. shift += XA_CHUNK_SHIFT;
  479. return shift + XA_CHUNK_SHIFT;
  480. } else if (xa_is_node(head)) {
  481. node = xa_to_node(head);
  482. shift = node->shift + XA_CHUNK_SHIFT;
  483. }
  484. xas->xa_node = NULL;
  485. while (max > max_index(head)) {
  486. xa_mark_t mark = 0;
  487. XA_NODE_BUG_ON(node, shift > BITS_PER_LONG);
  488. node = xas_alloc(xas, shift);
  489. if (!node)
  490. return -ENOMEM;
  491. node->count = 1;
  492. if (xa_is_value(head))
  493. node->nr_values = 1;
  494. RCU_INIT_POINTER(node->slots[0], head);
  495. /* Propagate the aggregated mark info to the new child */
  496. for (;;) {
  497. if (xa_track_free(xa) && mark == XA_FREE_MARK) {
  498. node_mark_all(node, XA_FREE_MARK);
  499. if (!xa_marked(xa, XA_FREE_MARK)) {
  500. node_clear_mark(node, 0, XA_FREE_MARK);
  501. xa_mark_set(xa, XA_FREE_MARK);
  502. }
  503. } else if (xa_marked(xa, mark)) {
  504. node_set_mark(node, 0, mark);
  505. }
  506. if (mark == XA_MARK_MAX)
  507. break;
  508. mark_inc(mark);
  509. }
  510. /*
  511. * Now that the new node is fully initialised, we can add
  512. * it to the tree
  513. */
  514. if (xa_is_node(head)) {
  515. xa_to_node(head)->offset = 0;
  516. rcu_assign_pointer(xa_to_node(head)->parent, node);
  517. }
  518. head = xa_mk_node(node);
  519. rcu_assign_pointer(xa->xa_head, head);
  520. xas_update(xas, node);
  521. shift += XA_CHUNK_SHIFT;
  522. }
  523. xas->xa_node = node;
  524. return shift;
  525. }
  526. /*
  527. * xas_create() - Create a slot to store an entry in.
  528. * @xas: XArray operation state.
  529. *
  530. * Most users will not need to call this function directly, as it is called
  531. * by xas_store(). It is useful for doing conditional store operations
  532. * (see the xa_cmpxchg() implementation for an example).
  533. *
  534. * Return: If the slot already existed, returns the contents of this slot.
  535. * If the slot was newly created, returns %NULL. If it failed to create the
  536. * slot, returns %NULL and indicates the error in @xas.
  537. */
  538. static void *xas_create(struct xa_state *xas)
  539. {
  540. struct xarray *xa = xas->xa;
  541. void *entry;
  542. void __rcu **slot;
  543. struct xa_node *node = xas->xa_node;
  544. int shift;
  545. unsigned int order = xas->xa_shift;
  546. if (xas_top(node)) {
  547. entry = xa_head_locked(xa);
  548. xas->xa_node = NULL;
  549. shift = xas_expand(xas, entry);
  550. if (shift < 0)
  551. return NULL;
  552. entry = xa_head_locked(xa);
  553. slot = &xa->xa_head;
  554. } else if (xas_error(xas)) {
  555. return NULL;
  556. } else if (node) {
  557. unsigned int offset = xas->xa_offset;
  558. shift = node->shift;
  559. entry = xa_entry_locked(xa, node, offset);
  560. slot = &node->slots[offset];
  561. } else {
  562. shift = 0;
  563. entry = xa_head_locked(xa);
  564. slot = &xa->xa_head;
  565. }
  566. while (shift > order) {
  567. shift -= XA_CHUNK_SHIFT;
  568. if (!entry) {
  569. node = xas_alloc(xas, shift);
  570. if (!node)
  571. break;
  572. if (xa_track_free(xa))
  573. node_mark_all(node, XA_FREE_MARK);
  574. rcu_assign_pointer(*slot, xa_mk_node(node));
  575. } else if (xa_is_node(entry)) {
  576. node = xa_to_node(entry);
  577. } else {
  578. break;
  579. }
  580. entry = xas_descend(xas, node);
  581. slot = &node->slots[xas->xa_offset];
  582. }
  583. return entry;
  584. }
  585. /**
  586. * xas_create_range() - Ensure that stores to this range will succeed
  587. * @xas: XArray operation state.
  588. *
  589. * Creates all of the slots in the range covered by @xas. Sets @xas to
  590. * create single-index entries and positions it at the beginning of the
  591. * range. This is for the benefit of users which have not yet been
  592. * converted to use multi-index entries.
  593. */
  594. void xas_create_range(struct xa_state *xas)
  595. {
  596. unsigned long index = xas->xa_index;
  597. unsigned char shift = xas->xa_shift;
  598. unsigned char sibs = xas->xa_sibs;
  599. xas->xa_index |= ((sibs + 1) << shift) - 1;
  600. if (xas_is_node(xas) && xas->xa_node->shift == xas->xa_shift)
  601. xas->xa_offset |= sibs;
  602. xas->xa_shift = 0;
  603. xas->xa_sibs = 0;
  604. for (;;) {
  605. xas_create(xas);
  606. if (xas_error(xas))
  607. goto restore;
  608. if (xas->xa_index <= (index | XA_CHUNK_MASK))
  609. goto success;
  610. xas->xa_index -= XA_CHUNK_SIZE;
  611. for (;;) {
  612. struct xa_node *node = xas->xa_node;
  613. xas->xa_node = xa_parent_locked(xas->xa, node);
  614. xas->xa_offset = node->offset - 1;
  615. if (node->offset != 0)
  616. break;
  617. }
  618. }
  619. restore:
  620. xas->xa_shift = shift;
  621. xas->xa_sibs = sibs;
  622. xas->xa_index = index;
  623. return;
  624. success:
  625. xas->xa_index = index;
  626. if (xas->xa_node)
  627. xas_set_offset(xas);
  628. }
  629. EXPORT_SYMBOL_GPL(xas_create_range);
  630. static void update_node(struct xa_state *xas, struct xa_node *node,
  631. int count, int values)
  632. {
  633. if (!node || (!count && !values))
  634. return;
  635. node->count += count;
  636. node->nr_values += values;
  637. XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
  638. XA_NODE_BUG_ON(node, node->nr_values > XA_CHUNK_SIZE);
  639. xas_update(xas, node);
  640. if (count < 0)
  641. xas_delete_node(xas);
  642. }
  643. /**
  644. * xas_store() - Store this entry in the XArray.
  645. * @xas: XArray operation state.
  646. * @entry: New entry.
  647. *
  648. * If @xas is operating on a multi-index entry, the entry returned by this
  649. * function is essentially meaningless (it may be an internal entry or it
  650. * may be %NULL, even if there are non-NULL entries at some of the indices
  651. * covered by the range). This is not a problem for any current users,
  652. * and can be changed if needed.
  653. *
  654. * Return: The old entry at this index.
  655. */
  656. void *xas_store(struct xa_state *xas, void *entry)
  657. {
  658. struct xa_node *node;
  659. void __rcu **slot = &xas->xa->xa_head;
  660. unsigned int offset, max;
  661. int count = 0;
  662. int values = 0;
  663. void *first, *next;
  664. bool value = xa_is_value(entry);
  665. if (entry)
  666. first = xas_create(xas);
  667. else
  668. first = xas_load(xas);
  669. if (xas_invalid(xas))
  670. return first;
  671. node = xas->xa_node;
  672. if (node && (xas->xa_shift < node->shift))
  673. xas->xa_sibs = 0;
  674. if ((first == entry) && !xas->xa_sibs)
  675. return first;
  676. next = first;
  677. offset = xas->xa_offset;
  678. max = xas->xa_offset + xas->xa_sibs;
  679. if (node) {
  680. slot = &node->slots[offset];
  681. if (xas->xa_sibs)
  682. xas_squash_marks(xas);
  683. }
  684. if (!entry)
  685. xas_init_marks(xas);
  686. for (;;) {
  687. /*
  688. * Must clear the marks before setting the entry to NULL,
  689. * otherwise xas_for_each_marked may find a NULL entry and
  690. * stop early. rcu_assign_pointer contains a release barrier
  691. * so the mark clearing will appear to happen before the
  692. * entry is set to NULL.
  693. */
  694. rcu_assign_pointer(*slot, entry);
  695. if (xa_is_node(next))
  696. xas_free_nodes(xas, xa_to_node(next));
  697. if (!node)
  698. break;
  699. count += !next - !entry;
  700. values += !xa_is_value(first) - !value;
  701. if (entry) {
  702. if (offset == max)
  703. break;
  704. if (!xa_is_sibling(entry))
  705. entry = xa_mk_sibling(xas->xa_offset);
  706. } else {
  707. if (offset == XA_CHUNK_MASK)
  708. break;
  709. }
  710. next = xa_entry_locked(xas->xa, node, ++offset);
  711. if (!xa_is_sibling(next)) {
  712. if (!entry && (offset > max))
  713. break;
  714. first = next;
  715. }
  716. slot++;
  717. }
  718. update_node(xas, node, count, values);
  719. return first;
  720. }
  721. EXPORT_SYMBOL_GPL(xas_store);
  722. /**
  723. * xas_get_mark() - Returns the state of this mark.
  724. * @xas: XArray operation state.
  725. * @mark: Mark number.
  726. *
  727. * Return: true if the mark is set, false if the mark is clear or @xas
  728. * is in an error state.
  729. */
  730. bool xas_get_mark(const struct xa_state *xas, xa_mark_t mark)
  731. {
  732. if (xas_invalid(xas))
  733. return false;
  734. if (!xas->xa_node)
  735. return xa_marked(xas->xa, mark);
  736. return node_get_mark(xas->xa_node, xas->xa_offset, mark);
  737. }
  738. EXPORT_SYMBOL_GPL(xas_get_mark);
  739. /**
  740. * xas_set_mark() - Sets the mark on this entry and its parents.
  741. * @xas: XArray operation state.
  742. * @mark: Mark number.
  743. *
  744. * Sets the specified mark on this entry, and walks up the tree setting it
  745. * on all the ancestor entries. Does nothing if @xas has not been walked to
  746. * an entry, or is in an error state.
  747. */
  748. void xas_set_mark(const struct xa_state *xas, xa_mark_t mark)
  749. {
  750. struct xa_node *node = xas->xa_node;
  751. unsigned int offset = xas->xa_offset;
  752. if (xas_invalid(xas))
  753. return;
  754. while (node) {
  755. if (node_set_mark(node, offset, mark))
  756. return;
  757. offset = node->offset;
  758. node = xa_parent_locked(xas->xa, node);
  759. }
  760. if (!xa_marked(xas->xa, mark))
  761. xa_mark_set(xas->xa, mark);
  762. }
  763. EXPORT_SYMBOL_GPL(xas_set_mark);
  764. /**
  765. * xas_clear_mark() - Clears the mark on this entry and its parents.
  766. * @xas: XArray operation state.
  767. * @mark: Mark number.
  768. *
  769. * Clears the specified mark on this entry, and walks back to the head
  770. * attempting to clear it on all the ancestor entries. Does nothing if
  771. * @xas has not been walked to an entry, or is in an error state.
  772. */
  773. void xas_clear_mark(const struct xa_state *xas, xa_mark_t mark)
  774. {
  775. struct xa_node *node = xas->xa_node;
  776. unsigned int offset = xas->xa_offset;
  777. if (xas_invalid(xas))
  778. return;
  779. while (node) {
  780. if (!node_clear_mark(node, offset, mark))
  781. return;
  782. if (node_any_mark(node, mark))
  783. return;
  784. offset = node->offset;
  785. node = xa_parent_locked(xas->xa, node);
  786. }
  787. if (xa_marked(xas->xa, mark))
  788. xa_mark_clear(xas->xa, mark);
  789. }
  790. EXPORT_SYMBOL_GPL(xas_clear_mark);
  791. /**
  792. * xas_init_marks() - Initialise all marks for the entry
  793. * @xas: Array operations state.
  794. *
  795. * Initialise all marks for the entry specified by @xas. If we're tracking
  796. * free entries with a mark, we need to set it on all entries. All other
  797. * marks are cleared.
  798. *
  799. * This implementation is not as efficient as it could be; we may walk
  800. * up the tree multiple times.
  801. */
  802. void xas_init_marks(const struct xa_state *xas)
  803. {
  804. xa_mark_t mark = 0;
  805. for (;;) {
  806. if (xa_track_free(xas->xa) && mark == XA_FREE_MARK)
  807. xas_set_mark(xas, mark);
  808. else
  809. xas_clear_mark(xas, mark);
  810. if (mark == XA_MARK_MAX)
  811. break;
  812. mark_inc(mark);
  813. }
  814. }
  815. EXPORT_SYMBOL_GPL(xas_init_marks);
  816. /**
  817. * xas_pause() - Pause a walk to drop a lock.
  818. * @xas: XArray operation state.
  819. *
  820. * Some users need to pause a walk and drop the lock they're holding in
  821. * order to yield to a higher priority thread or carry out an operation
  822. * on an entry. Those users should call this function before they drop
  823. * the lock. It resets the @xas to be suitable for the next iteration
  824. * of the loop after the user has reacquired the lock. If most entries
  825. * found during a walk require you to call xas_pause(), the xa_for_each()
  826. * iterator may be more appropriate.
  827. *
  828. * Note that xas_pause() only works for forward iteration. If a user needs
  829. * to pause a reverse iteration, we will need a xas_pause_rev().
  830. */
  831. void xas_pause(struct xa_state *xas)
  832. {
  833. struct xa_node *node = xas->xa_node;
  834. if (xas_invalid(xas))
  835. return;
  836. if (node) {
  837. unsigned int offset = xas->xa_offset;
  838. while (++offset < XA_CHUNK_SIZE) {
  839. if (!xa_is_sibling(xa_entry(xas->xa, node, offset)))
  840. break;
  841. }
  842. xas->xa_index += (offset - xas->xa_offset) << node->shift;
  843. } else {
  844. xas->xa_index++;
  845. }
  846. xas->xa_node = XAS_RESTART;
  847. }
  848. EXPORT_SYMBOL_GPL(xas_pause);
  849. /*
  850. * __xas_prev() - Find the previous entry in the XArray.
  851. * @xas: XArray operation state.
  852. *
  853. * Helper function for xas_prev() which handles all the complex cases
  854. * out of line.
  855. */
  856. void *__xas_prev(struct xa_state *xas)
  857. {
  858. void *entry;
  859. if (!xas_frozen(xas->xa_node))
  860. xas->xa_index--;
  861. if (xas_not_node(xas->xa_node))
  862. return xas_load(xas);
  863. if (xas->xa_offset != get_offset(xas->xa_index, xas->xa_node))
  864. xas->xa_offset--;
  865. while (xas->xa_offset == 255) {
  866. xas->xa_offset = xas->xa_node->offset - 1;
  867. xas->xa_node = xa_parent(xas->xa, xas->xa_node);
  868. if (!xas->xa_node)
  869. return set_bounds(xas);
  870. }
  871. for (;;) {
  872. entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
  873. if (!xa_is_node(entry))
  874. return entry;
  875. xas->xa_node = xa_to_node(entry);
  876. xas_set_offset(xas);
  877. }
  878. }
  879. EXPORT_SYMBOL_GPL(__xas_prev);
  880. /*
  881. * __xas_next() - Find the next entry in the XArray.
  882. * @xas: XArray operation state.
  883. *
  884. * Helper function for xas_next() which handles all the complex cases
  885. * out of line.
  886. */
  887. void *__xas_next(struct xa_state *xas)
  888. {
  889. void *entry;
  890. if (!xas_frozen(xas->xa_node))
  891. xas->xa_index++;
  892. if (xas_not_node(xas->xa_node))
  893. return xas_load(xas);
  894. if (xas->xa_offset != get_offset(xas->xa_index, xas->xa_node))
  895. xas->xa_offset++;
  896. while (xas->xa_offset == XA_CHUNK_SIZE) {
  897. xas->xa_offset = xas->xa_node->offset + 1;
  898. xas->xa_node = xa_parent(xas->xa, xas->xa_node);
  899. if (!xas->xa_node)
  900. return set_bounds(xas);
  901. }
  902. for (;;) {
  903. entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
  904. if (!xa_is_node(entry))
  905. return entry;
  906. xas->xa_node = xa_to_node(entry);
  907. xas_set_offset(xas);
  908. }
  909. }
  910. EXPORT_SYMBOL_GPL(__xas_next);
  911. /**
  912. * xas_find() - Find the next present entry in the XArray.
  913. * @xas: XArray operation state.
  914. * @max: Highest index to return.
  915. *
  916. * If the @xas has not yet been walked to an entry, return the entry
  917. * which has an index >= xas.xa_index. If it has been walked, the entry
  918. * currently being pointed at has been processed, and so we move to the
  919. * next entry.
  920. *
  921. * If no entry is found and the array is smaller than @max, the iterator
  922. * is set to the smallest index not yet in the array. This allows @xas
  923. * to be immediately passed to xas_store().
  924. *
  925. * Return: The entry, if found, otherwise %NULL.
  926. */
  927. void *xas_find(struct xa_state *xas, unsigned long max)
  928. {
  929. void *entry;
  930. if (xas_error(xas))
  931. return NULL;
  932. if (!xas->xa_node) {
  933. xas->xa_index = 1;
  934. return set_bounds(xas);
  935. } else if (xas_top(xas->xa_node)) {
  936. entry = xas_load(xas);
  937. if (entry || xas_not_node(xas->xa_node))
  938. return entry;
  939. } else if (!xas->xa_node->shift &&
  940. xas->xa_offset != (xas->xa_index & XA_CHUNK_MASK)) {
  941. xas->xa_offset = ((xas->xa_index - 1) & XA_CHUNK_MASK) + 1;
  942. }
  943. xas_advance(xas);
  944. while (xas->xa_node && (xas->xa_index <= max)) {
  945. if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) {
  946. xas->xa_offset = xas->xa_node->offset + 1;
  947. xas->xa_node = xa_parent(xas->xa, xas->xa_node);
  948. continue;
  949. }
  950. entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
  951. if (xa_is_node(entry)) {
  952. xas->xa_node = xa_to_node(entry);
  953. xas->xa_offset = 0;
  954. continue;
  955. }
  956. if (entry && !xa_is_sibling(entry))
  957. return entry;
  958. xas_advance(xas);
  959. }
  960. if (!xas->xa_node)
  961. xas->xa_node = XAS_BOUNDS;
  962. return NULL;
  963. }
  964. EXPORT_SYMBOL_GPL(xas_find);
  965. /**
  966. * xas_find_marked() - Find the next marked entry in the XArray.
  967. * @xas: XArray operation state.
  968. * @max: Highest index to return.
  969. * @mark: Mark number to search for.
  970. *
  971. * If the @xas has not yet been walked to an entry, return the marked entry
  972. * which has an index >= xas.xa_index. If it has been walked, the entry
  973. * currently being pointed at has been processed, and so we return the
  974. * first marked entry with an index > xas.xa_index.
  975. *
  976. * If no marked entry is found and the array is smaller than @max, @xas is
  977. * set to the bounds state and xas->xa_index is set to the smallest index
  978. * not yet in the array. This allows @xas to be immediately passed to
  979. * xas_store().
  980. *
  981. * If no entry is found before @max is reached, @xas is set to the restart
  982. * state.
  983. *
  984. * Return: The entry, if found, otherwise %NULL.
  985. */
  986. void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark)
  987. {
  988. bool advance = true;
  989. unsigned int offset;
  990. void *entry;
  991. if (xas_error(xas))
  992. return NULL;
  993. if (!xas->xa_node) {
  994. xas->xa_index = 1;
  995. goto out;
  996. } else if (xas_top(xas->xa_node)) {
  997. advance = false;
  998. entry = xa_head(xas->xa);
  999. xas->xa_node = NULL;
  1000. if (xas->xa_index > max_index(entry))
  1001. goto bounds;
  1002. if (!xa_is_node(entry)) {
  1003. if (xa_marked(xas->xa, mark))
  1004. return entry;
  1005. xas->xa_index = 1;
  1006. goto out;
  1007. }
  1008. xas->xa_node = xa_to_node(entry);
  1009. xas->xa_offset = xas->xa_index >> xas->xa_node->shift;
  1010. }
  1011. while (xas->xa_index <= max) {
  1012. if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) {
  1013. xas->xa_offset = xas->xa_node->offset + 1;
  1014. xas->xa_node = xa_parent(xas->xa, xas->xa_node);
  1015. if (!xas->xa_node)
  1016. break;
  1017. advance = false;
  1018. continue;
  1019. }
  1020. if (!advance) {
  1021. entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
  1022. if (xa_is_sibling(entry)) {
  1023. xas->xa_offset = xa_to_sibling(entry);
  1024. xas_move_index(xas, xas->xa_offset);
  1025. }
  1026. }
  1027. offset = xas_find_chunk(xas, advance, mark);
  1028. if (offset > xas->xa_offset) {
  1029. advance = false;
  1030. xas_move_index(xas, offset);
  1031. /* Mind the wrap */
  1032. if ((xas->xa_index - 1) >= max)
  1033. goto max;
  1034. xas->xa_offset = offset;
  1035. if (offset == XA_CHUNK_SIZE)
  1036. continue;
  1037. }
  1038. entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
  1039. if (!xa_is_node(entry))
  1040. return entry;
  1041. xas->xa_node = xa_to_node(entry);
  1042. xas_set_offset(xas);
  1043. }
  1044. out:
  1045. if (!max)
  1046. goto max;
  1047. bounds:
  1048. xas->xa_node = XAS_BOUNDS;
  1049. return NULL;
  1050. max:
  1051. xas->xa_node = XAS_RESTART;
  1052. return NULL;
  1053. }
  1054. EXPORT_SYMBOL_GPL(xas_find_marked);
  1055. /**
  1056. * xas_find_conflict() - Find the next present entry in a range.
  1057. * @xas: XArray operation state.
  1058. *
  1059. * The @xas describes both a range and a position within that range.
  1060. *
  1061. * Context: Any context. Expects xa_lock to be held.
  1062. * Return: The next entry in the range covered by @xas or %NULL.
  1063. */
  1064. void *xas_find_conflict(struct xa_state *xas)
  1065. {
  1066. void *curr;
  1067. if (xas_error(xas))
  1068. return NULL;
  1069. if (!xas->xa_node)
  1070. return NULL;
  1071. if (xas_top(xas->xa_node)) {
  1072. curr = xas_start(xas);
  1073. if (!curr)
  1074. return NULL;
  1075. while (xa_is_node(curr)) {
  1076. struct xa_node *node = xa_to_node(curr);
  1077. curr = xas_descend(xas, node);
  1078. }
  1079. if (curr)
  1080. return curr;
  1081. }
  1082. if (xas->xa_node->shift > xas->xa_shift)
  1083. return NULL;
  1084. for (;;) {
  1085. if (xas->xa_node->shift == xas->xa_shift) {
  1086. if ((xas->xa_offset & xas->xa_sibs) == xas->xa_sibs)
  1087. break;
  1088. } else if (xas->xa_offset == XA_CHUNK_MASK) {
  1089. xas->xa_offset = xas->xa_node->offset;
  1090. xas->xa_node = xa_parent_locked(xas->xa, xas->xa_node);
  1091. if (!xas->xa_node)
  1092. break;
  1093. continue;
  1094. }
  1095. curr = xa_entry_locked(xas->xa, xas->xa_node, ++xas->xa_offset);
  1096. if (xa_is_sibling(curr))
  1097. continue;
  1098. while (xa_is_node(curr)) {
  1099. xas->xa_node = xa_to_node(curr);
  1100. xas->xa_offset = 0;
  1101. curr = xa_entry_locked(xas->xa, xas->xa_node, 0);
  1102. }
  1103. if (curr)
  1104. return curr;
  1105. }
  1106. xas->xa_offset -= xas->xa_sibs;
  1107. return NULL;
  1108. }
  1109. EXPORT_SYMBOL_GPL(xas_find_conflict);
  1110. /**
  1111. * xa_init_flags() - Initialise an empty XArray with flags.
  1112. * @xa: XArray.
  1113. * @flags: XA_FLAG values.
  1114. *
  1115. * If you need to initialise an XArray with special flags (eg you need
  1116. * to take the lock from interrupt context), use this function instead
  1117. * of xa_init().
  1118. *
  1119. * Context: Any context.
  1120. */
  1121. void xa_init_flags(struct xarray *xa, gfp_t flags)
  1122. {
  1123. unsigned int lock_type;
  1124. static struct lock_class_key xa_lock_irq;
  1125. static struct lock_class_key xa_lock_bh;
  1126. spin_lock_init(&xa->xa_lock);
  1127. xa->xa_flags = flags;
  1128. xa->xa_head = NULL;
  1129. lock_type = xa_lock_type(xa);
  1130. if (lock_type == XA_LOCK_IRQ)
  1131. lockdep_set_class(&xa->xa_lock, &xa_lock_irq);
  1132. else if (lock_type == XA_LOCK_BH)
  1133. lockdep_set_class(&xa->xa_lock, &xa_lock_bh);
  1134. }
  1135. EXPORT_SYMBOL(xa_init_flags);
  1136. /**
  1137. * xa_load() - Load an entry from an XArray.
  1138. * @xa: XArray.
  1139. * @index: index into array.
  1140. *
  1141. * Context: Any context. Takes and releases the RCU lock.
  1142. * Return: The entry at @index in @xa.
  1143. */
  1144. void *xa_load(struct xarray *xa, unsigned long index)
  1145. {
  1146. XA_STATE(xas, xa, index);
  1147. void *entry;
  1148. rcu_read_lock();
  1149. do {
  1150. entry = xas_load(&xas);
  1151. if (xa_is_zero(entry))
  1152. entry = NULL;
  1153. } while (xas_retry(&xas, entry));
  1154. rcu_read_unlock();
  1155. return entry;
  1156. }
  1157. EXPORT_SYMBOL(xa_load);
  1158. static void *xas_result(struct xa_state *xas, void *curr)
  1159. {
  1160. if (xa_is_zero(curr))
  1161. return NULL;
  1162. XA_NODE_BUG_ON(xas->xa_node, xa_is_internal(curr));
  1163. if (xas_error(xas))
  1164. curr = xas->xa_node;
  1165. return curr;
  1166. }
  1167. /**
  1168. * __xa_erase() - Erase this entry from the XArray while locked.
  1169. * @xa: XArray.
  1170. * @index: Index into array.
  1171. *
  1172. * If the entry at this index is a multi-index entry then all indices will
  1173. * be erased, and the entry will no longer be a multi-index entry.
  1174. * This function expects the xa_lock to be held on entry.
  1175. *
  1176. * Context: Any context. Expects xa_lock to be held on entry. May
  1177. * release and reacquire xa_lock if @gfp flags permit.
  1178. * Return: The old entry at this index.
  1179. */
  1180. void *__xa_erase(struct xarray *xa, unsigned long index)
  1181. {
  1182. XA_STATE(xas, xa, index);
  1183. return xas_result(&xas, xas_store(&xas, NULL));
  1184. }
  1185. EXPORT_SYMBOL(__xa_erase);
  1186. /**
  1187. * xa_erase() - Erase this entry from the XArray.
  1188. * @xa: XArray.
  1189. * @index: Index of entry.
  1190. *
  1191. * This function is the equivalent of calling xa_store() with %NULL as
  1192. * the third argument. The XArray does not need to allocate memory, so
  1193. * the user does not need to provide GFP flags.
  1194. *
  1195. * Context: Any context. Takes and releases the xa_lock.
  1196. * Return: The entry which used to be at this index.
  1197. */
  1198. void *xa_erase(struct xarray *xa, unsigned long index)
  1199. {
  1200. void *entry;
  1201. xa_lock(xa);
  1202. entry = __xa_erase(xa, index);
  1203. xa_unlock(xa);
  1204. return entry;
  1205. }
  1206. EXPORT_SYMBOL(xa_erase);
  1207. /**
  1208. * __xa_store() - Store this entry in the XArray.
  1209. * @xa: XArray.
  1210. * @index: Index into array.
  1211. * @entry: New entry.
  1212. * @gfp: Memory allocation flags.
  1213. *
  1214. * You must already be holding the xa_lock when calling this function.
  1215. * It will drop the lock if needed to allocate memory, and then reacquire
  1216. * it afterwards.
  1217. *
  1218. * Context: Any context. Expects xa_lock to be held on entry. May
  1219. * release and reacquire xa_lock if @gfp flags permit.
  1220. * Return: The old entry at this index or xa_err() if an error happened.
  1221. */
  1222. void *__xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
  1223. {
  1224. XA_STATE(xas, xa, index);
  1225. void *curr;
  1226. if (WARN_ON_ONCE(xa_is_internal(entry)))
  1227. return XA_ERROR(-EINVAL);
  1228. if (xa_track_free(xa) && !entry)
  1229. entry = XA_ZERO_ENTRY;
  1230. do {
  1231. curr = xas_store(&xas, entry);
  1232. if (xa_track_free(xa))
  1233. xas_clear_mark(&xas, XA_FREE_MARK);
  1234. } while (__xas_nomem(&xas, gfp));
  1235. return xas_result(&xas, curr);
  1236. }
  1237. EXPORT_SYMBOL(__xa_store);
  1238. /**
  1239. * xa_store() - Store this entry in the XArray.
  1240. * @xa: XArray.
  1241. * @index: Index into array.
  1242. * @entry: New entry.
  1243. * @gfp: Memory allocation flags.
  1244. *
  1245. * After this function returns, loads from this index will return @entry.
  1246. * Storing into an existing multislot entry updates the entry of every index.
  1247. * The marks associated with @index are unaffected unless @entry is %NULL.
  1248. *
  1249. * Context: Any context. Takes and releases the xa_lock.
  1250. * May sleep if the @gfp flags permit.
  1251. * Return: The old entry at this index on success, xa_err(-EINVAL) if @entry
  1252. * cannot be stored in an XArray, or xa_err(-ENOMEM) if memory allocation
  1253. * failed.
  1254. */
  1255. void *xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
  1256. {
  1257. void *curr;
  1258. xa_lock(xa);
  1259. curr = __xa_store(xa, index, entry, gfp);
  1260. xa_unlock(xa);
  1261. return curr;
  1262. }
  1263. EXPORT_SYMBOL(xa_store);
  1264. /**
  1265. * __xa_cmpxchg() - Store this entry in the XArray.
  1266. * @xa: XArray.
  1267. * @index: Index into array.
  1268. * @old: Old value to test against.
  1269. * @entry: New entry.
  1270. * @gfp: Memory allocation flags.
  1271. *
  1272. * You must already be holding the xa_lock when calling this function.
  1273. * It will drop the lock if needed to allocate memory, and then reacquire
  1274. * it afterwards.
  1275. *
  1276. * Context: Any context. Expects xa_lock to be held on entry. May
  1277. * release and reacquire xa_lock if @gfp flags permit.
  1278. * Return: The old entry at this index or xa_err() if an error happened.
  1279. */
  1280. void *__xa_cmpxchg(struct xarray *xa, unsigned long index,
  1281. void *old, void *entry, gfp_t gfp)
  1282. {
  1283. XA_STATE(xas, xa, index);
  1284. void *curr;
  1285. if (WARN_ON_ONCE(xa_is_internal(entry)))
  1286. return XA_ERROR(-EINVAL);
  1287. if (xa_track_free(xa) && !entry)
  1288. entry = XA_ZERO_ENTRY;
  1289. do {
  1290. curr = xas_load(&xas);
  1291. if (curr == XA_ZERO_ENTRY)
  1292. curr = NULL;
  1293. if (curr == old) {
  1294. xas_store(&xas, entry);
  1295. if (xa_track_free(xa))
  1296. xas_clear_mark(&xas, XA_FREE_MARK);
  1297. }
  1298. } while (__xas_nomem(&xas, gfp));
  1299. return xas_result(&xas, curr);
  1300. }
  1301. EXPORT_SYMBOL(__xa_cmpxchg);
  1302. /**
  1303. * __xa_reserve() - Reserve this index in the XArray.
  1304. * @xa: XArray.
  1305. * @index: Index into array.
  1306. * @gfp: Memory allocation flags.
  1307. *
  1308. * Ensures there is somewhere to store an entry at @index in the array.
  1309. * If there is already something stored at @index, this function does
  1310. * nothing. If there was nothing there, the entry is marked as reserved.
  1311. * Loading from a reserved entry returns a %NULL pointer.
  1312. *
  1313. * If you do not use the entry that you have reserved, call xa_release()
  1314. * or xa_erase() to free any unnecessary memory.
  1315. *
  1316. * Context: Any context. Expects the xa_lock to be held on entry. May
  1317. * release the lock, sleep and reacquire the lock if the @gfp flags permit.
  1318. * Return: 0 if the reservation succeeded or -ENOMEM if it failed.
  1319. */
  1320. int __xa_reserve(struct xarray *xa, unsigned long index, gfp_t gfp)
  1321. {
  1322. XA_STATE(xas, xa, index);
  1323. void *curr;
  1324. do {
  1325. curr = xas_load(&xas);
  1326. if (!curr) {
  1327. xas_store(&xas, XA_ZERO_ENTRY);
  1328. if (xa_track_free(xa))
  1329. xas_clear_mark(&xas, XA_FREE_MARK);
  1330. }
  1331. } while (__xas_nomem(&xas, gfp));
  1332. return xas_error(&xas);
  1333. }
  1334. EXPORT_SYMBOL(__xa_reserve);
  1335. #ifdef CONFIG_XARRAY_MULTI
  1336. static void xas_set_range(struct xa_state *xas, unsigned long first,
  1337. unsigned long last)
  1338. {
  1339. unsigned int shift = 0;
  1340. unsigned long sibs = last - first;
  1341. unsigned int offset = XA_CHUNK_MASK;
  1342. xas_set(xas, first);
  1343. while ((first & XA_CHUNK_MASK) == 0) {
  1344. if (sibs < XA_CHUNK_MASK)
  1345. break;
  1346. if ((sibs == XA_CHUNK_MASK) && (offset < XA_CHUNK_MASK))
  1347. break;
  1348. shift += XA_CHUNK_SHIFT;
  1349. if (offset == XA_CHUNK_MASK)
  1350. offset = sibs & XA_CHUNK_MASK;
  1351. sibs >>= XA_CHUNK_SHIFT;
  1352. first >>= XA_CHUNK_SHIFT;
  1353. }
  1354. offset = first & XA_CHUNK_MASK;
  1355. if (offset + sibs > XA_CHUNK_MASK)
  1356. sibs = XA_CHUNK_MASK - offset;
  1357. if ((((first + sibs + 1) << shift) - 1) > last)
  1358. sibs -= 1;
  1359. xas->xa_shift = shift;
  1360. xas->xa_sibs = sibs;
  1361. }
  1362. /**
  1363. * xa_store_range() - Store this entry at a range of indices in the XArray.
  1364. * @xa: XArray.
  1365. * @first: First index to affect.
  1366. * @last: Last index to affect.
  1367. * @entry: New entry.
  1368. * @gfp: Memory allocation flags.
  1369. *
  1370. * After this function returns, loads from any index between @first and @last,
  1371. * inclusive will return @entry.
  1372. * Storing into an existing multislot entry updates the entry of every index.
  1373. * The marks associated with @index are unaffected unless @entry is %NULL.
  1374. *
  1375. * Context: Process context. Takes and releases the xa_lock. May sleep
  1376. * if the @gfp flags permit.
  1377. * Return: %NULL on success, xa_err(-EINVAL) if @entry cannot be stored in
  1378. * an XArray, or xa_err(-ENOMEM) if memory allocation failed.
  1379. */
  1380. void *xa_store_range(struct xarray *xa, unsigned long first,
  1381. unsigned long last, void *entry, gfp_t gfp)
  1382. {
  1383. XA_STATE(xas, xa, 0);
  1384. if (WARN_ON_ONCE(xa_is_internal(entry)))
  1385. return XA_ERROR(-EINVAL);
  1386. if (last < first)
  1387. return XA_ERROR(-EINVAL);
  1388. do {
  1389. xas_lock(&xas);
  1390. if (entry) {
  1391. unsigned int order = (last == ~0UL) ? 64 :
  1392. ilog2(last + 1);
  1393. xas_set_order(&xas, last, order);
  1394. xas_create(&xas);
  1395. if (xas_error(&xas))
  1396. goto unlock;
  1397. }
  1398. do {
  1399. xas_set_range(&xas, first, last);
  1400. xas_store(&xas, entry);
  1401. if (xas_error(&xas))
  1402. goto unlock;
  1403. first += xas_size(&xas);
  1404. } while (first <= last);
  1405. unlock:
  1406. xas_unlock(&xas);
  1407. } while (xas_nomem(&xas, gfp));
  1408. return xas_result(&xas, NULL);
  1409. }
  1410. EXPORT_SYMBOL(xa_store_range);
  1411. #endif /* CONFIG_XARRAY_MULTI */
  1412. /**
  1413. * __xa_alloc() - Find somewhere to store this entry in the XArray.
  1414. * @xa: XArray.
  1415. * @id: Pointer to ID.
  1416. * @max: Maximum ID to allocate (inclusive).
  1417. * @entry: New entry.
  1418. * @gfp: Memory allocation flags.
  1419. *
  1420. * Allocates an unused ID in the range specified by @id and @max.
  1421. * Updates the @id pointer with the index, then stores the entry at that
  1422. * index. A concurrent lookup will not see an uninitialised @id.
  1423. *
  1424. * Context: Any context. Expects xa_lock to be held on entry. May
  1425. * release and reacquire xa_lock if @gfp flags permit.
  1426. * Return: 0 on success, -ENOMEM if memory allocation fails or -ENOSPC if
  1427. * there is no more space in the XArray.
  1428. */
  1429. int __xa_alloc(struct xarray *xa, u32 *id, u32 max, void *entry, gfp_t gfp)
  1430. {
  1431. XA_STATE(xas, xa, 0);
  1432. int err;
  1433. if (WARN_ON_ONCE(xa_is_internal(entry)))
  1434. return -EINVAL;
  1435. if (WARN_ON_ONCE(!xa_track_free(xa)))
  1436. return -EINVAL;
  1437. if (!entry)
  1438. entry = XA_ZERO_ENTRY;
  1439. do {
  1440. xas.xa_index = *id;
  1441. xas_find_marked(&xas, max, XA_FREE_MARK);
  1442. if (xas.xa_node == XAS_RESTART)
  1443. xas_set_err(&xas, -ENOSPC);
  1444. xas_store(&xas, entry);
  1445. xas_clear_mark(&xas, XA_FREE_MARK);
  1446. } while (__xas_nomem(&xas, gfp));
  1447. err = xas_error(&xas);
  1448. if (!err)
  1449. *id = xas.xa_index;
  1450. return err;
  1451. }
  1452. EXPORT_SYMBOL(__xa_alloc);
  1453. /**
  1454. * __xa_set_mark() - Set this mark on this entry while locked.
  1455. * @xa: XArray.
  1456. * @index: Index of entry.
  1457. * @mark: Mark number.
  1458. *
  1459. * Attempting to set a mark on a %NULL entry does not succeed.
  1460. *
  1461. * Context: Any context. Expects xa_lock to be held on entry.
  1462. */
  1463. void __xa_set_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
  1464. {
  1465. XA_STATE(xas, xa, index);
  1466. void *entry = xas_load(&xas);
  1467. if (entry)
  1468. xas_set_mark(&xas, mark);
  1469. }
  1470. EXPORT_SYMBOL(__xa_set_mark);
  1471. /**
  1472. * __xa_clear_mark() - Clear this mark on this entry while locked.
  1473. * @xa: XArray.
  1474. * @index: Index of entry.
  1475. * @mark: Mark number.
  1476. *
  1477. * Context: Any context. Expects xa_lock to be held on entry.
  1478. */
  1479. void __xa_clear_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
  1480. {
  1481. XA_STATE(xas, xa, index);
  1482. void *entry = xas_load(&xas);
  1483. if (entry)
  1484. xas_clear_mark(&xas, mark);
  1485. }
  1486. EXPORT_SYMBOL(__xa_clear_mark);
  1487. /**
  1488. * xa_get_mark() - Inquire whether this mark is set on this entry.
  1489. * @xa: XArray.
  1490. * @index: Index of entry.
  1491. * @mark: Mark number.
  1492. *
  1493. * This function uses the RCU read lock, so the result may be out of date
  1494. * by the time it returns. If you need the result to be stable, use a lock.
  1495. *
  1496. * Context: Any context. Takes and releases the RCU lock.
  1497. * Return: True if the entry at @index has this mark set, false if it doesn't.
  1498. */
  1499. bool xa_get_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
  1500. {
  1501. XA_STATE(xas, xa, index);
  1502. void *entry;
  1503. rcu_read_lock();
  1504. entry = xas_start(&xas);
  1505. while (xas_get_mark(&xas, mark)) {
  1506. if (!xa_is_node(entry))
  1507. goto found;
  1508. entry = xas_descend(&xas, xa_to_node(entry));
  1509. }
  1510. rcu_read_unlock();
  1511. return false;
  1512. found:
  1513. rcu_read_unlock();
  1514. return true;
  1515. }
  1516. EXPORT_SYMBOL(xa_get_mark);
  1517. /**
  1518. * xa_set_mark() - Set this mark on this entry.
  1519. * @xa: XArray.
  1520. * @index: Index of entry.
  1521. * @mark: Mark number.
  1522. *
  1523. * Attempting to set a mark on a %NULL entry does not succeed.
  1524. *
  1525. * Context: Process context. Takes and releases the xa_lock.
  1526. */
  1527. void xa_set_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
  1528. {
  1529. xa_lock(xa);
  1530. __xa_set_mark(xa, index, mark);
  1531. xa_unlock(xa);
  1532. }
  1533. EXPORT_SYMBOL(xa_set_mark);
  1534. /**
  1535. * xa_clear_mark() - Clear this mark on this entry.
  1536. * @xa: XArray.
  1537. * @index: Index of entry.
  1538. * @mark: Mark number.
  1539. *
  1540. * Clearing a mark always succeeds.
  1541. *
  1542. * Context: Process context. Takes and releases the xa_lock.
  1543. */
  1544. void xa_clear_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
  1545. {
  1546. xa_lock(xa);
  1547. __xa_clear_mark(xa, index, mark);
  1548. xa_unlock(xa);
  1549. }
  1550. EXPORT_SYMBOL(xa_clear_mark);
  1551. /**
  1552. * xa_find() - Search the XArray for an entry.
  1553. * @xa: XArray.
  1554. * @indexp: Pointer to an index.
  1555. * @max: Maximum index to search to.
  1556. * @filter: Selection criterion.
  1557. *
  1558. * Finds the entry in @xa which matches the @filter, and has the lowest
  1559. * index that is at least @indexp and no more than @max.
  1560. * If an entry is found, @indexp is updated to be the index of the entry.
  1561. * This function is protected by the RCU read lock, so it may not find
  1562. * entries which are being simultaneously added. It will not return an
  1563. * %XA_RETRY_ENTRY; if you need to see retry entries, use xas_find().
  1564. *
  1565. * Context: Any context. Takes and releases the RCU lock.
  1566. * Return: The entry, if found, otherwise %NULL.
  1567. */
  1568. void *xa_find(struct xarray *xa, unsigned long *indexp,
  1569. unsigned long max, xa_mark_t filter)
  1570. {
  1571. XA_STATE(xas, xa, *indexp);
  1572. void *entry;
  1573. rcu_read_lock();
  1574. do {
  1575. if ((__force unsigned int)filter < XA_MAX_MARKS)
  1576. entry = xas_find_marked(&xas, max, filter);
  1577. else
  1578. entry = xas_find(&xas, max);
  1579. } while (xas_retry(&xas, entry));
  1580. rcu_read_unlock();
  1581. if (entry)
  1582. *indexp = xas.xa_index;
  1583. return entry;
  1584. }
  1585. EXPORT_SYMBOL(xa_find);
  1586. /**
  1587. * xa_find_after() - Search the XArray for a present entry.
  1588. * @xa: XArray.
  1589. * @indexp: Pointer to an index.
  1590. * @max: Maximum index to search to.
  1591. * @filter: Selection criterion.
  1592. *
  1593. * Finds the entry in @xa which matches the @filter and has the lowest
  1594. * index that is above @indexp and no more than @max.
  1595. * If an entry is found, @indexp is updated to be the index of the entry.
  1596. * This function is protected by the RCU read lock, so it may miss entries
  1597. * which are being simultaneously added. It will not return an
  1598. * %XA_RETRY_ENTRY; if you need to see retry entries, use xas_find().
  1599. *
  1600. * Context: Any context. Takes and releases the RCU lock.
  1601. * Return: The pointer, if found, otherwise %NULL.
  1602. */
  1603. void *xa_find_after(struct xarray *xa, unsigned long *indexp,
  1604. unsigned long max, xa_mark_t filter)
  1605. {
  1606. XA_STATE(xas, xa, *indexp + 1);
  1607. void *entry;
  1608. rcu_read_lock();
  1609. for (;;) {
  1610. if ((__force unsigned int)filter < XA_MAX_MARKS)
  1611. entry = xas_find_marked(&xas, max, filter);
  1612. else
  1613. entry = xas_find(&xas, max);
  1614. if (xas.xa_node == XAS_BOUNDS)
  1615. break;
  1616. if (xas.xa_shift) {
  1617. if (xas.xa_index & ((1UL << xas.xa_shift) - 1))
  1618. continue;
  1619. } else {
  1620. if (xas.xa_offset < (xas.xa_index & XA_CHUNK_MASK))
  1621. continue;
  1622. }
  1623. if (!xas_retry(&xas, entry))
  1624. break;
  1625. }
  1626. rcu_read_unlock();
  1627. if (entry)
  1628. *indexp = xas.xa_index;
  1629. return entry;
  1630. }
  1631. EXPORT_SYMBOL(xa_find_after);
  1632. static unsigned int xas_extract_present(struct xa_state *xas, void **dst,
  1633. unsigned long max, unsigned int n)
  1634. {
  1635. void *entry;
  1636. unsigned int i = 0;
  1637. rcu_read_lock();
  1638. xas_for_each(xas, entry, max) {
  1639. if (xas_retry(xas, entry))
  1640. continue;
  1641. dst[i++] = entry;
  1642. if (i == n)
  1643. break;
  1644. }
  1645. rcu_read_unlock();
  1646. return i;
  1647. }
  1648. static unsigned int xas_extract_marked(struct xa_state *xas, void **dst,
  1649. unsigned long max, unsigned int n, xa_mark_t mark)
  1650. {
  1651. void *entry;
  1652. unsigned int i = 0;
  1653. rcu_read_lock();
  1654. xas_for_each_marked(xas, entry, max, mark) {
  1655. if (xas_retry(xas, entry))
  1656. continue;
  1657. dst[i++] = entry;
  1658. if (i == n)
  1659. break;
  1660. }
  1661. rcu_read_unlock();
  1662. return i;
  1663. }
  1664. /**
  1665. * xa_extract() - Copy selected entries from the XArray into a normal array.
  1666. * @xa: The source XArray to copy from.
  1667. * @dst: The buffer to copy entries into.
  1668. * @start: The first index in the XArray eligible to be selected.
  1669. * @max: The last index in the XArray eligible to be selected.
  1670. * @n: The maximum number of entries to copy.
  1671. * @filter: Selection criterion.
  1672. *
  1673. * Copies up to @n entries that match @filter from the XArray. The
  1674. * copied entries will have indices between @start and @max, inclusive.
  1675. *
  1676. * The @filter may be an XArray mark value, in which case entries which are
  1677. * marked with that mark will be copied. It may also be %XA_PRESENT, in
  1678. * which case all entries which are not %NULL will be copied.
  1679. *
  1680. * The entries returned may not represent a snapshot of the XArray at a
  1681. * moment in time. For example, if another thread stores to index 5, then
  1682. * index 10, calling xa_extract() may return the old contents of index 5
  1683. * and the new contents of index 10. Indices not modified while this
  1684. * function is running will not be skipped.
  1685. *
  1686. * If you need stronger guarantees, holding the xa_lock across calls to this
  1687. * function will prevent concurrent modification.
  1688. *
  1689. * Context: Any context. Takes and releases the RCU lock.
  1690. * Return: The number of entries copied.
  1691. */
  1692. unsigned int xa_extract(struct xarray *xa, void **dst, unsigned long start,
  1693. unsigned long max, unsigned int n, xa_mark_t filter)
  1694. {
  1695. XA_STATE(xas, xa, start);
  1696. if (!n)
  1697. return 0;
  1698. if ((__force unsigned int)filter < XA_MAX_MARKS)
  1699. return xas_extract_marked(&xas, dst, max, n, filter);
  1700. return xas_extract_present(&xas, dst, max, n);
  1701. }
  1702. EXPORT_SYMBOL(xa_extract);
  1703. /**
  1704. * xa_destroy() - Free all internal data structures.
  1705. * @xa: XArray.
  1706. *
  1707. * After calling this function, the XArray is empty and has freed all memory
  1708. * allocated for its internal data structures. You are responsible for
  1709. * freeing the objects referenced by the XArray.
  1710. *
  1711. * Context: Any context. Takes and releases the xa_lock, interrupt-safe.
  1712. */
  1713. void xa_destroy(struct xarray *xa)
  1714. {
  1715. XA_STATE(xas, xa, 0);
  1716. unsigned long flags;
  1717. void *entry;
  1718. xas.xa_node = NULL;
  1719. xas_lock_irqsave(&xas, flags);
  1720. entry = xa_head_locked(xa);
  1721. RCU_INIT_POINTER(xa->xa_head, NULL);
  1722. xas_init_marks(&xas);
  1723. /* lockdep checks we're still holding the lock in xas_free_nodes() */
  1724. if (xa_is_node(entry))
  1725. xas_free_nodes(&xas, xa_to_node(entry));
  1726. xas_unlock_irqrestore(&xas, flags);
  1727. }
  1728. EXPORT_SYMBOL(xa_destroy);
  1729. #ifdef XA_DEBUG
  1730. void xa_dump_node(const struct xa_node *node)
  1731. {
  1732. unsigned i, j;
  1733. if (!node)
  1734. return;
  1735. if ((unsigned long)node & 3) {
  1736. pr_cont("node %px\n", node);
  1737. return;
  1738. }
  1739. pr_cont("node %px %s %d parent %px shift %d count %d values %d "
  1740. "array %px list %px %px marks",
  1741. node, node->parent ? "offset" : "max", node->offset,
  1742. node->parent, node->shift, node->count, node->nr_values,
  1743. node->array, node->private_list.prev, node->private_list.next);
  1744. for (i = 0; i < XA_MAX_MARKS; i++)
  1745. for (j = 0; j < XA_MARK_LONGS; j++)
  1746. pr_cont(" %lx", node->marks[i][j]);
  1747. pr_cont("\n");
  1748. }
  1749. void xa_dump_index(unsigned long index, unsigned int shift)
  1750. {
  1751. if (!shift)
  1752. pr_info("%lu: ", index);
  1753. else if (shift >= BITS_PER_LONG)
  1754. pr_info("0-%lu: ", ~0UL);
  1755. else
  1756. pr_info("%lu-%lu: ", index, index | ((1UL << shift) - 1));
  1757. }
  1758. void xa_dump_entry(const void *entry, unsigned long index, unsigned long shift)
  1759. {
  1760. if (!entry)
  1761. return;
  1762. xa_dump_index(index, shift);
  1763. if (xa_is_node(entry)) {
  1764. if (shift == 0) {
  1765. pr_cont("%px\n", entry);
  1766. } else {
  1767. unsigned long i;
  1768. struct xa_node *node = xa_to_node(entry);
  1769. xa_dump_node(node);
  1770. for (i = 0; i < XA_CHUNK_SIZE; i++)
  1771. xa_dump_entry(node->slots[i],
  1772. index + (i << node->shift), node->shift);
  1773. }
  1774. } else if (xa_is_value(entry))
  1775. pr_cont("value %ld (0x%lx) [%px]\n", xa_to_value(entry),
  1776. xa_to_value(entry), entry);
  1777. else if (!xa_is_internal(entry))
  1778. pr_cont("%px\n", entry);
  1779. else if (xa_is_retry(entry))
  1780. pr_cont("retry (%ld)\n", xa_to_internal(entry));
  1781. else if (xa_is_sibling(entry))
  1782. pr_cont("sibling (slot %ld)\n", xa_to_sibling(entry));
  1783. else if (xa_is_zero(entry))
  1784. pr_cont("zero (%ld)\n", xa_to_internal(entry));
  1785. else
  1786. pr_cont("UNKNOWN ENTRY (%px)\n", entry);
  1787. }
  1788. void xa_dump(const struct xarray *xa)
  1789. {
  1790. void *entry = xa->xa_head;
  1791. unsigned int shift = 0;
  1792. pr_info("xarray: %px head %px flags %x marks %d %d %d\n", xa, entry,
  1793. xa->xa_flags, xa_marked(xa, XA_MARK_0),
  1794. xa_marked(xa, XA_MARK_1), xa_marked(xa, XA_MARK_2));
  1795. if (xa_is_node(entry))
  1796. shift = xa_to_node(entry)->shift + XA_CHUNK_SHIFT;
  1797. xa_dump_entry(entry, 0, shift);
  1798. }
  1799. #endif