xarray.c 39 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * XArray implementation
  4. * Copyright (c) 2017 Microsoft Corporation
  5. * Author: Matthew Wilcox <willy@infradead.org>
  6. */
  7. #include <linux/bitmap.h>
  8. #include <linux/export.h>
  9. #include <linux/list.h>
  10. #include <linux/slab.h>
  11. #include <linux/xarray.h>
  12. /*
  13. * Coding conventions in this file:
  14. *
  15. * @xa is used to refer to the entire xarray.
  16. * @xas is the 'xarray operation state'. It may be either a pointer to
  17. * an xa_state, or an xa_state stored on the stack. This is an unfortunate
  18. * ambiguity.
  19. * @index is the index of the entry being operated on
  20. * @mark is an xa_mark_t; a small number indicating one of the mark bits.
  21. * @node refers to an xa_node; usually the primary one being operated on by
  22. * this function.
  23. * @offset is the index into the slots array inside an xa_node.
  24. * @parent refers to the @xa_node closer to the head than @node.
  25. * @entry refers to something stored in a slot in the xarray
  26. */
  27. static inline unsigned int xa_lock_type(const struct xarray *xa)
  28. {
  29. return (__force unsigned int)xa->xa_flags & 3;
  30. }
  31. static inline void xas_lock_type(struct xa_state *xas, unsigned int lock_type)
  32. {
  33. if (lock_type == XA_LOCK_IRQ)
  34. xas_lock_irq(xas);
  35. else if (lock_type == XA_LOCK_BH)
  36. xas_lock_bh(xas);
  37. else
  38. xas_lock(xas);
  39. }
  40. static inline void xas_unlock_type(struct xa_state *xas, unsigned int lock_type)
  41. {
  42. if (lock_type == XA_LOCK_IRQ)
  43. xas_unlock_irq(xas);
  44. else if (lock_type == XA_LOCK_BH)
  45. xas_unlock_bh(xas);
  46. else
  47. xas_unlock(xas);
  48. }
  49. static inline void xa_mark_set(struct xarray *xa, xa_mark_t mark)
  50. {
  51. if (!(xa->xa_flags & XA_FLAGS_MARK(mark)))
  52. xa->xa_flags |= XA_FLAGS_MARK(mark);
  53. }
  54. static inline void xa_mark_clear(struct xarray *xa, xa_mark_t mark)
  55. {
  56. if (xa->xa_flags & XA_FLAGS_MARK(mark))
  57. xa->xa_flags &= ~(XA_FLAGS_MARK(mark));
  58. }
  59. static inline unsigned long *node_marks(struct xa_node *node, xa_mark_t mark)
  60. {
  61. return node->marks[(__force unsigned)mark];
  62. }
  63. static inline bool node_get_mark(struct xa_node *node,
  64. unsigned int offset, xa_mark_t mark)
  65. {
  66. return test_bit(offset, node_marks(node, mark));
  67. }
  68. /* returns true if the bit was set */
  69. static inline bool node_set_mark(struct xa_node *node, unsigned int offset,
  70. xa_mark_t mark)
  71. {
  72. return __test_and_set_bit(offset, node_marks(node, mark));
  73. }
  74. /* returns true if the bit was set */
  75. static inline bool node_clear_mark(struct xa_node *node, unsigned int offset,
  76. xa_mark_t mark)
  77. {
  78. return __test_and_clear_bit(offset, node_marks(node, mark));
  79. }
  80. static inline bool node_any_mark(struct xa_node *node, xa_mark_t mark)
  81. {
  82. return !bitmap_empty(node_marks(node, mark), XA_CHUNK_SIZE);
  83. }
  84. #define mark_inc(mark) do { \
  85. mark = (__force xa_mark_t)((__force unsigned)(mark) + 1); \
  86. } while (0)
  87. /*
  88. * xas_squash_marks() - Merge all marks to the first entry
  89. * @xas: Array operation state.
  90. *
  91. * Set a mark on the first entry if any entry has it set. Clear marks on
  92. * all sibling entries.
  93. */
  94. static void xas_squash_marks(const struct xa_state *xas)
  95. {
  96. unsigned int mark = 0;
  97. unsigned int limit = xas->xa_offset + xas->xa_sibs + 1;
  98. if (!xas->xa_sibs)
  99. return;
  100. do {
  101. unsigned long *marks = xas->xa_node->marks[mark];
  102. if (find_next_bit(marks, limit, xas->xa_offset + 1) == limit)
  103. continue;
  104. __set_bit(xas->xa_offset, marks);
  105. bitmap_clear(marks, xas->xa_offset + 1, xas->xa_sibs);
  106. } while (mark++ != (__force unsigned)XA_MARK_MAX);
  107. }
  108. /* extracts the offset within this node from the index */
  109. static unsigned int get_offset(unsigned long index, struct xa_node *node)
  110. {
  111. return (index >> node->shift) & XA_CHUNK_MASK;
  112. }
  113. static void xas_set_offset(struct xa_state *xas)
  114. {
  115. xas->xa_offset = get_offset(xas->xa_index, xas->xa_node);
  116. }
  117. /* move the index either forwards (find) or backwards (sibling slot) */
  118. static void xas_move_index(struct xa_state *xas, unsigned long offset)
  119. {
  120. unsigned int shift = xas->xa_node->shift;
  121. xas->xa_index &= ~XA_CHUNK_MASK << shift;
  122. xas->xa_index += offset << shift;
  123. }
  124. static void xas_advance(struct xa_state *xas)
  125. {
  126. xas->xa_offset++;
  127. xas_move_index(xas, xas->xa_offset);
  128. }
  129. static void *set_bounds(struct xa_state *xas)
  130. {
  131. xas->xa_node = XAS_BOUNDS;
  132. return NULL;
  133. }
  134. /*
  135. * Starts a walk. If the @xas is already valid, we assume that it's on
  136. * the right path and just return where we've got to. If we're in an
  137. * error state, return NULL. If the index is outside the current scope
  138. * of the xarray, return NULL without changing @xas->xa_node. Otherwise
  139. * set @xas->xa_node to NULL and return the current head of the array.
  140. */
  141. static void *xas_start(struct xa_state *xas)
  142. {
  143. void *entry;
  144. if (xas_valid(xas))
  145. return xas_reload(xas);
  146. if (xas_error(xas))
  147. return NULL;
  148. entry = xa_head(xas->xa);
  149. if (!xa_is_node(entry)) {
  150. if (xas->xa_index)
  151. return set_bounds(xas);
  152. } else {
  153. if ((xas->xa_index >> xa_to_node(entry)->shift) > XA_CHUNK_MASK)
  154. return set_bounds(xas);
  155. }
  156. xas->xa_node = NULL;
  157. return entry;
  158. }
  159. static void *xas_descend(struct xa_state *xas, struct xa_node *node)
  160. {
  161. unsigned int offset = get_offset(xas->xa_index, node);
  162. void *entry = xa_entry(xas->xa, node, offset);
  163. xas->xa_node = node;
  164. if (xa_is_sibling(entry)) {
  165. offset = xa_to_sibling(entry);
  166. entry = xa_entry(xas->xa, node, offset);
  167. }
  168. xas->xa_offset = offset;
  169. return entry;
  170. }
  171. /**
  172. * xas_load() - Load an entry from the XArray (advanced).
  173. * @xas: XArray operation state.
  174. *
  175. * Usually walks the @xas to the appropriate state to load the entry
  176. * stored at xa_index. However, it will do nothing and return %NULL if
  177. * @xas is in an error state. xas_load() will never expand the tree.
  178. *
  179. * If the xa_state is set up to operate on a multi-index entry, xas_load()
  180. * may return %NULL or an internal entry, even if there are entries
  181. * present within the range specified by @xas.
  182. *
  183. * Context: Any context. The caller should hold the xa_lock or the RCU lock.
  184. * Return: Usually an entry in the XArray, but see description for exceptions.
  185. */
  186. void *xas_load(struct xa_state *xas)
  187. {
  188. void *entry = xas_start(xas);
  189. while (xa_is_node(entry)) {
  190. struct xa_node *node = xa_to_node(entry);
  191. if (xas->xa_shift > node->shift)
  192. break;
  193. entry = xas_descend(xas, node);
  194. }
  195. return entry;
  196. }
  197. EXPORT_SYMBOL_GPL(xas_load);
  198. /* Move the radix tree node cache here */
  199. extern struct kmem_cache *radix_tree_node_cachep;
  200. extern void radix_tree_node_rcu_free(struct rcu_head *head);
  201. #define XA_RCU_FREE ((struct xarray *)1)
  202. static void xa_node_free(struct xa_node *node)
  203. {
  204. XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
  205. node->array = XA_RCU_FREE;
  206. call_rcu(&node->rcu_head, radix_tree_node_rcu_free);
  207. }
  208. /*
  209. * xas_destroy() - Free any resources allocated during the XArray operation.
  210. * @xas: XArray operation state.
  211. *
  212. * This function is now internal-only.
  213. */
  214. static void xas_destroy(struct xa_state *xas)
  215. {
  216. struct xa_node *node = xas->xa_alloc;
  217. if (!node)
  218. return;
  219. XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
  220. kmem_cache_free(radix_tree_node_cachep, node);
  221. xas->xa_alloc = NULL;
  222. }
  223. /**
  224. * xas_nomem() - Allocate memory if needed.
  225. * @xas: XArray operation state.
  226. * @gfp: Memory allocation flags.
  227. *
  228. * If we need to add new nodes to the XArray, we try to allocate memory
  229. * with GFP_NOWAIT while holding the lock, which will usually succeed.
  230. * If it fails, @xas is flagged as needing memory to continue. The caller
  231. * should drop the lock and call xas_nomem(). If xas_nomem() succeeds,
  232. * the caller should retry the operation.
  233. *
  234. * Forward progress is guaranteed as one node is allocated here and
  235. * stored in the xa_state where it will be found by xas_alloc(). More
  236. * nodes will likely be found in the slab allocator, but we do not tie
  237. * them up here.
  238. *
  239. * Return: true if memory was needed, and was successfully allocated.
  240. */
  241. bool xas_nomem(struct xa_state *xas, gfp_t gfp)
  242. {
  243. if (xas->xa_node != XA_ERROR(-ENOMEM)) {
  244. xas_destroy(xas);
  245. return false;
  246. }
  247. xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
  248. if (!xas->xa_alloc)
  249. return false;
  250. XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list));
  251. xas->xa_node = XAS_RESTART;
  252. return true;
  253. }
  254. EXPORT_SYMBOL_GPL(xas_nomem);
  255. /*
  256. * __xas_nomem() - Drop locks and allocate memory if needed.
  257. * @xas: XArray operation state.
  258. * @gfp: Memory allocation flags.
  259. *
  260. * Internal variant of xas_nomem().
  261. *
  262. * Return: true if memory was needed, and was successfully allocated.
  263. */
  264. static bool __xas_nomem(struct xa_state *xas, gfp_t gfp)
  265. __must_hold(xas->xa->xa_lock)
  266. {
  267. unsigned int lock_type = xa_lock_type(xas->xa);
  268. if (xas->xa_node != XA_ERROR(-ENOMEM)) {
  269. xas_destroy(xas);
  270. return false;
  271. }
  272. if (gfpflags_allow_blocking(gfp)) {
  273. xas_unlock_type(xas, lock_type);
  274. xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
  275. xas_lock_type(xas, lock_type);
  276. } else {
  277. xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
  278. }
  279. if (!xas->xa_alloc)
  280. return false;
  281. XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list));
  282. xas->xa_node = XAS_RESTART;
  283. return true;
  284. }
  285. static void xas_update(struct xa_state *xas, struct xa_node *node)
  286. {
  287. if (xas->xa_update)
  288. xas->xa_update(node);
  289. else
  290. XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
  291. }
  292. static void *xas_alloc(struct xa_state *xas, unsigned int shift)
  293. {
  294. struct xa_node *parent = xas->xa_node;
  295. struct xa_node *node = xas->xa_alloc;
  296. if (xas_invalid(xas))
  297. return NULL;
  298. if (node) {
  299. xas->xa_alloc = NULL;
  300. } else {
  301. node = kmem_cache_alloc(radix_tree_node_cachep,
  302. GFP_NOWAIT | __GFP_NOWARN);
  303. if (!node) {
  304. xas_set_err(xas, -ENOMEM);
  305. return NULL;
  306. }
  307. }
  308. if (parent) {
  309. node->offset = xas->xa_offset;
  310. parent->count++;
  311. XA_NODE_BUG_ON(node, parent->count > XA_CHUNK_SIZE);
  312. xas_update(xas, parent);
  313. }
  314. XA_NODE_BUG_ON(node, shift > BITS_PER_LONG);
  315. XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
  316. node->shift = shift;
  317. node->count = 0;
  318. node->nr_values = 0;
  319. RCU_INIT_POINTER(node->parent, xas->xa_node);
  320. node->array = xas->xa;
  321. return node;
  322. }
  323. /*
  324. * Use this to calculate the maximum index that will need to be created
  325. * in order to add the entry described by @xas. Because we cannot store a
  326. * multiple-index entry at index 0, the calculation is a little more complex
  327. * than you might expect.
  328. */
  329. static unsigned long xas_max(struct xa_state *xas)
  330. {
  331. unsigned long max = xas->xa_index;
  332. #ifdef CONFIG_XARRAY_MULTI
  333. if (xas->xa_shift || xas->xa_sibs) {
  334. unsigned long mask;
  335. mask = (((xas->xa_sibs + 1UL) << xas->xa_shift) - 1);
  336. max |= mask;
  337. if (mask == max)
  338. max++;
  339. }
  340. #endif
  341. return max;
  342. }
  343. /* The maximum index that can be contained in the array without expanding it */
  344. static unsigned long max_index(void *entry)
  345. {
  346. if (!xa_is_node(entry))
  347. return 0;
  348. return (XA_CHUNK_SIZE << xa_to_node(entry)->shift) - 1;
  349. }
  350. static void xas_shrink(struct xa_state *xas)
  351. {
  352. struct xarray *xa = xas->xa;
  353. struct xa_node *node = xas->xa_node;
  354. for (;;) {
  355. void *entry;
  356. XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
  357. if (node->count != 1)
  358. break;
  359. entry = xa_entry_locked(xa, node, 0);
  360. if (!entry)
  361. break;
  362. if (!xa_is_node(entry) && node->shift)
  363. break;
  364. xas->xa_node = XAS_BOUNDS;
  365. RCU_INIT_POINTER(xa->xa_head, entry);
  366. node->count = 0;
  367. node->nr_values = 0;
  368. if (!xa_is_node(entry))
  369. RCU_INIT_POINTER(node->slots[0], XA_RETRY_ENTRY);
  370. xas_update(xas, node);
  371. xa_node_free(node);
  372. if (!xa_is_node(entry))
  373. break;
  374. node = xa_to_node(entry);
  375. node->parent = NULL;
  376. }
  377. }
  378. /*
  379. * xas_delete_node() - Attempt to delete an xa_node
  380. * @xas: Array operation state.
  381. *
  382. * Attempts to delete the @xas->xa_node. This will fail if xa->node has
  383. * a non-zero reference count.
  384. */
  385. static void xas_delete_node(struct xa_state *xas)
  386. {
  387. struct xa_node *node = xas->xa_node;
  388. for (;;) {
  389. struct xa_node *parent;
  390. XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
  391. if (node->count)
  392. break;
  393. parent = xa_parent_locked(xas->xa, node);
  394. xas->xa_node = parent;
  395. xas->xa_offset = node->offset;
  396. xa_node_free(node);
  397. if (!parent) {
  398. xas->xa->xa_head = NULL;
  399. xas->xa_node = XAS_BOUNDS;
  400. return;
  401. }
  402. parent->slots[xas->xa_offset] = NULL;
  403. parent->count--;
  404. XA_NODE_BUG_ON(parent, parent->count > XA_CHUNK_SIZE);
  405. node = parent;
  406. xas_update(xas, node);
  407. }
  408. if (!node->parent)
  409. xas_shrink(xas);
  410. }
  411. /**
  412. * xas_free_nodes() - Free this node and all nodes that it references
  413. * @xas: Array operation state.
  414. * @top: Node to free
  415. *
  416. * This node has been removed from the tree. We must now free it and all
  417. * of its subnodes. There may be RCU walkers with references into the tree,
  418. * so we must replace all entries with retry markers.
  419. */
  420. static void xas_free_nodes(struct xa_state *xas, struct xa_node *top)
  421. {
  422. unsigned int offset = 0;
  423. struct xa_node *node = top;
  424. for (;;) {
  425. void *entry = xa_entry_locked(xas->xa, node, offset);
  426. if (xa_is_node(entry)) {
  427. node = xa_to_node(entry);
  428. offset = 0;
  429. continue;
  430. }
  431. if (entry)
  432. RCU_INIT_POINTER(node->slots[offset], XA_RETRY_ENTRY);
  433. offset++;
  434. while (offset == XA_CHUNK_SIZE) {
  435. struct xa_node *parent;
  436. parent = xa_parent_locked(xas->xa, node);
  437. offset = node->offset + 1;
  438. node->count = 0;
  439. node->nr_values = 0;
  440. xas_update(xas, node);
  441. xa_node_free(node);
  442. if (node == top)
  443. return;
  444. node = parent;
  445. }
  446. }
  447. }
  448. /*
  449. * xas_expand adds nodes to the head of the tree until it has reached
  450. * sufficient height to be able to contain @xas->xa_index
  451. */
  452. static int xas_expand(struct xa_state *xas, void *head)
  453. {
  454. struct xarray *xa = xas->xa;
  455. struct xa_node *node = NULL;
  456. unsigned int shift = 0;
  457. unsigned long max = xas_max(xas);
  458. if (!head) {
  459. if (max == 0)
  460. return 0;
  461. while ((max >> shift) >= XA_CHUNK_SIZE)
  462. shift += XA_CHUNK_SHIFT;
  463. return shift + XA_CHUNK_SHIFT;
  464. } else if (xa_is_node(head)) {
  465. node = xa_to_node(head);
  466. shift = node->shift + XA_CHUNK_SHIFT;
  467. }
  468. xas->xa_node = NULL;
  469. while (max > max_index(head)) {
  470. xa_mark_t mark = 0;
  471. XA_NODE_BUG_ON(node, shift > BITS_PER_LONG);
  472. node = xas_alloc(xas, shift);
  473. if (!node)
  474. return -ENOMEM;
  475. node->count = 1;
  476. if (xa_is_value(head))
  477. node->nr_values = 1;
  478. RCU_INIT_POINTER(node->slots[0], head);
  479. /* Propagate the aggregated mark info to the new child */
  480. for (;;) {
  481. if (xa_marked(xa, mark))
  482. node_set_mark(node, 0, mark);
  483. if (mark == XA_MARK_MAX)
  484. break;
  485. mark_inc(mark);
  486. }
  487. /*
  488. * Now that the new node is fully initialised, we can add
  489. * it to the tree
  490. */
  491. if (xa_is_node(head)) {
  492. xa_to_node(head)->offset = 0;
  493. rcu_assign_pointer(xa_to_node(head)->parent, node);
  494. }
  495. head = xa_mk_node(node);
  496. rcu_assign_pointer(xa->xa_head, head);
  497. xas_update(xas, node);
  498. shift += XA_CHUNK_SHIFT;
  499. }
  500. xas->xa_node = node;
  501. return shift;
  502. }
  503. /*
  504. * xas_create() - Create a slot to store an entry in.
  505. * @xas: XArray operation state.
  506. *
  507. * Most users will not need to call this function directly, as it is called
  508. * by xas_store(). It is useful for doing conditional store operations
  509. * (see the xa_cmpxchg() implementation for an example).
  510. *
  511. * Return: If the slot already existed, returns the contents of this slot.
  512. * If the slot was newly created, returns NULL. If it failed to create the
  513. * slot, returns NULL and indicates the error in @xas.
  514. */
  515. static void *xas_create(struct xa_state *xas)
  516. {
  517. struct xarray *xa = xas->xa;
  518. void *entry;
  519. void __rcu **slot;
  520. struct xa_node *node = xas->xa_node;
  521. int shift;
  522. unsigned int order = xas->xa_shift;
  523. if (xas_top(node)) {
  524. entry = xa_head_locked(xa);
  525. xas->xa_node = NULL;
  526. shift = xas_expand(xas, entry);
  527. if (shift < 0)
  528. return NULL;
  529. entry = xa_head_locked(xa);
  530. slot = &xa->xa_head;
  531. } else if (xas_error(xas)) {
  532. return NULL;
  533. } else if (node) {
  534. unsigned int offset = xas->xa_offset;
  535. shift = node->shift;
  536. entry = xa_entry_locked(xa, node, offset);
  537. slot = &node->slots[offset];
  538. } else {
  539. shift = 0;
  540. entry = xa_head_locked(xa);
  541. slot = &xa->xa_head;
  542. }
  543. while (shift > order) {
  544. shift -= XA_CHUNK_SHIFT;
  545. if (!entry) {
  546. node = xas_alloc(xas, shift);
  547. if (!node)
  548. break;
  549. rcu_assign_pointer(*slot, xa_mk_node(node));
  550. } else if (xa_is_node(entry)) {
  551. node = xa_to_node(entry);
  552. } else {
  553. break;
  554. }
  555. entry = xas_descend(xas, node);
  556. slot = &node->slots[xas->xa_offset];
  557. }
  558. return entry;
  559. }
  560. static void update_node(struct xa_state *xas, struct xa_node *node,
  561. int count, int values)
  562. {
  563. if (!node || (!count && !values))
  564. return;
  565. node->count += count;
  566. node->nr_values += values;
  567. XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
  568. XA_NODE_BUG_ON(node, node->nr_values > XA_CHUNK_SIZE);
  569. xas_update(xas, node);
  570. if (count < 0)
  571. xas_delete_node(xas);
  572. }
  573. /**
  574. * xas_store() - Store this entry in the XArray.
  575. * @xas: XArray operation state.
  576. * @entry: New entry.
  577. *
  578. * If @xas is operating on a multi-index entry, the entry returned by this
  579. * function is essentially meaningless (it may be an internal entry or it
  580. * may be %NULL, even if there are non-NULL entries at some of the indices
  581. * covered by the range). This is not a problem for any current users,
  582. * and can be changed if needed.
  583. *
  584. * Return: The old entry at this index.
  585. */
  586. void *xas_store(struct xa_state *xas, void *entry)
  587. {
  588. struct xa_node *node;
  589. void __rcu **slot = &xas->xa->xa_head;
  590. unsigned int offset, max;
  591. int count = 0;
  592. int values = 0;
  593. void *first, *next;
  594. bool value = xa_is_value(entry);
  595. if (entry)
  596. first = xas_create(xas);
  597. else
  598. first = xas_load(xas);
  599. if (xas_invalid(xas))
  600. return first;
  601. node = xas->xa_node;
  602. if (node && (xas->xa_shift < node->shift))
  603. xas->xa_sibs = 0;
  604. if ((first == entry) && !xas->xa_sibs)
  605. return first;
  606. next = first;
  607. offset = xas->xa_offset;
  608. max = xas->xa_offset + xas->xa_sibs;
  609. if (node) {
  610. slot = &node->slots[offset];
  611. if (xas->xa_sibs)
  612. xas_squash_marks(xas);
  613. }
  614. if (!entry)
  615. xas_init_marks(xas);
  616. for (;;) {
  617. /*
  618. * Must clear the marks before setting the entry to NULL,
  619. * otherwise xas_for_each_marked may find a NULL entry and
  620. * stop early. rcu_assign_pointer contains a release barrier
  621. * so the mark clearing will appear to happen before the
  622. * entry is set to NULL.
  623. */
  624. rcu_assign_pointer(*slot, entry);
  625. if (xa_is_node(next))
  626. xas_free_nodes(xas, xa_to_node(next));
  627. if (!node)
  628. break;
  629. count += !next - !entry;
  630. values += !xa_is_value(first) - !value;
  631. if (entry) {
  632. if (offset == max)
  633. break;
  634. if (!xa_is_sibling(entry))
  635. entry = xa_mk_sibling(xas->xa_offset);
  636. } else {
  637. if (offset == XA_CHUNK_MASK)
  638. break;
  639. }
  640. next = xa_entry_locked(xas->xa, node, ++offset);
  641. if (!xa_is_sibling(next)) {
  642. if (!entry && (offset > max))
  643. break;
  644. first = next;
  645. }
  646. slot++;
  647. }
  648. update_node(xas, node, count, values);
  649. return first;
  650. }
  651. EXPORT_SYMBOL_GPL(xas_store);
  652. /**
  653. * xas_get_mark() - Returns the state of this mark.
  654. * @xas: XArray operation state.
  655. * @mark: Mark number.
  656. *
  657. * Return: true if the mark is set, false if the mark is clear or @xas
  658. * is in an error state.
  659. */
  660. bool xas_get_mark(const struct xa_state *xas, xa_mark_t mark)
  661. {
  662. if (xas_invalid(xas))
  663. return false;
  664. if (!xas->xa_node)
  665. return xa_marked(xas->xa, mark);
  666. return node_get_mark(xas->xa_node, xas->xa_offset, mark);
  667. }
  668. EXPORT_SYMBOL_GPL(xas_get_mark);
  669. /**
  670. * xas_set_mark() - Sets the mark on this entry and its parents.
  671. * @xas: XArray operation state.
  672. * @mark: Mark number.
  673. *
  674. * Sets the specified mark on this entry, and walks up the tree setting it
  675. * on all the ancestor entries. Does nothing if @xas has not been walked to
  676. * an entry, or is in an error state.
  677. */
  678. void xas_set_mark(const struct xa_state *xas, xa_mark_t mark)
  679. {
  680. struct xa_node *node = xas->xa_node;
  681. unsigned int offset = xas->xa_offset;
  682. if (xas_invalid(xas))
  683. return;
  684. while (node) {
  685. if (node_set_mark(node, offset, mark))
  686. return;
  687. offset = node->offset;
  688. node = xa_parent_locked(xas->xa, node);
  689. }
  690. if (!xa_marked(xas->xa, mark))
  691. xa_mark_set(xas->xa, mark);
  692. }
  693. EXPORT_SYMBOL_GPL(xas_set_mark);
  694. /**
  695. * xas_clear_mark() - Clears the mark on this entry and its parents.
  696. * @xas: XArray operation state.
  697. * @mark: Mark number.
  698. *
  699. * Clears the specified mark on this entry, and walks back to the head
  700. * attempting to clear it on all the ancestor entries. Does nothing if
  701. * @xas has not been walked to an entry, or is in an error state.
  702. */
  703. void xas_clear_mark(const struct xa_state *xas, xa_mark_t mark)
  704. {
  705. struct xa_node *node = xas->xa_node;
  706. unsigned int offset = xas->xa_offset;
  707. if (xas_invalid(xas))
  708. return;
  709. while (node) {
  710. if (!node_clear_mark(node, offset, mark))
  711. return;
  712. if (node_any_mark(node, mark))
  713. return;
  714. offset = node->offset;
  715. node = xa_parent_locked(xas->xa, node);
  716. }
  717. if (xa_marked(xas->xa, mark))
  718. xa_mark_clear(xas->xa, mark);
  719. }
  720. EXPORT_SYMBOL_GPL(xas_clear_mark);
  721. /**
  722. * xas_init_marks() - Initialise all marks for the entry
  723. * @xas: Array operations state.
  724. *
  725. * Initialise all marks for the entry specified by @xas. If we're tracking
  726. * free entries with a mark, we need to set it on all entries. All other
  727. * marks are cleared.
  728. *
  729. * This implementation is not as efficient as it could be; we may walk
  730. * up the tree multiple times.
  731. */
  732. void xas_init_marks(const struct xa_state *xas)
  733. {
  734. xa_mark_t mark = 0;
  735. for (;;) {
  736. xas_clear_mark(xas, mark);
  737. if (mark == XA_MARK_MAX)
  738. break;
  739. mark_inc(mark);
  740. }
  741. }
  742. EXPORT_SYMBOL_GPL(xas_init_marks);
  743. /**
  744. * xas_pause() - Pause a walk to drop a lock.
  745. * @xas: XArray operation state.
  746. *
  747. * Some users need to pause a walk and drop the lock they're holding in
  748. * order to yield to a higher priority thread or carry out an operation
  749. * on an entry. Those users should call this function before they drop
  750. * the lock. It resets the @xas to be suitable for the next iteration
  751. * of the loop after the user has reacquired the lock. If most entries
  752. * found during a walk require you to call xas_pause(), the xa_for_each()
  753. * iterator may be more appropriate.
  754. *
  755. * Note that xas_pause() only works for forward iteration. If a user needs
  756. * to pause a reverse iteration, we will need a xas_pause_rev().
  757. */
  758. void xas_pause(struct xa_state *xas)
  759. {
  760. struct xa_node *node = xas->xa_node;
  761. if (xas_invalid(xas))
  762. return;
  763. if (node) {
  764. unsigned int offset = xas->xa_offset;
  765. while (++offset < XA_CHUNK_SIZE) {
  766. if (!xa_is_sibling(xa_entry(xas->xa, node, offset)))
  767. break;
  768. }
  769. xas->xa_index += (offset - xas->xa_offset) << node->shift;
  770. } else {
  771. xas->xa_index++;
  772. }
  773. xas->xa_node = XAS_RESTART;
  774. }
  775. EXPORT_SYMBOL_GPL(xas_pause);
  776. /**
  777. * xas_find() - Find the next present entry in the XArray.
  778. * @xas: XArray operation state.
  779. * @max: Highest index to return.
  780. *
  781. * If the @xas has not yet been walked to an entry, return the entry
  782. * which has an index >= xas.xa_index. If it has been walked, the entry
  783. * currently being pointed at has been processed, and so we move to the
  784. * next entry.
  785. *
  786. * If no entry is found and the array is smaller than @max, the iterator
  787. * is set to the smallest index not yet in the array. This allows @xas
  788. * to be immediately passed to xas_store().
  789. *
  790. * Return: The entry, if found, otherwise %NULL.
  791. */
  792. void *xas_find(struct xa_state *xas, unsigned long max)
  793. {
  794. void *entry;
  795. if (xas_error(xas))
  796. return NULL;
  797. if (!xas->xa_node) {
  798. xas->xa_index = 1;
  799. return set_bounds(xas);
  800. } else if (xas_top(xas->xa_node)) {
  801. entry = xas_load(xas);
  802. if (entry || xas_not_node(xas->xa_node))
  803. return entry;
  804. } else if (!xas->xa_node->shift &&
  805. xas->xa_offset != (xas->xa_index & XA_CHUNK_MASK)) {
  806. xas->xa_offset = ((xas->xa_index - 1) & XA_CHUNK_MASK) + 1;
  807. }
  808. xas_advance(xas);
  809. while (xas->xa_node && (xas->xa_index <= max)) {
  810. if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) {
  811. xas->xa_offset = xas->xa_node->offset + 1;
  812. xas->xa_node = xa_parent(xas->xa, xas->xa_node);
  813. continue;
  814. }
  815. entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
  816. if (xa_is_node(entry)) {
  817. xas->xa_node = xa_to_node(entry);
  818. xas->xa_offset = 0;
  819. continue;
  820. }
  821. if (entry && !xa_is_sibling(entry))
  822. return entry;
  823. xas_advance(xas);
  824. }
  825. if (!xas->xa_node)
  826. xas->xa_node = XAS_BOUNDS;
  827. return NULL;
  828. }
  829. EXPORT_SYMBOL_GPL(xas_find);
  830. /**
  831. * xas_find_marked() - Find the next marked entry in the XArray.
  832. * @xas: XArray operation state.
  833. * @max: Highest index to return.
  834. * @mark: Mark number to search for.
  835. *
  836. * If the @xas has not yet been walked to an entry, return the marked entry
  837. * which has an index >= xas.xa_index. If it has been walked, the entry
  838. * currently being pointed at has been processed, and so we return the
  839. * first marked entry with an index > xas.xa_index.
  840. *
  841. * If no marked entry is found and the array is smaller than @max, @xas is
  842. * set to the bounds state and xas->xa_index is set to the smallest index
  843. * not yet in the array. This allows @xas to be immediately passed to
  844. * xas_store().
  845. *
  846. * If no entry is found before @max is reached, @xas is set to the restart
  847. * state.
  848. *
  849. * Return: The entry, if found, otherwise %NULL.
  850. */
  851. void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark)
  852. {
  853. bool advance = true;
  854. unsigned int offset;
  855. void *entry;
  856. if (xas_error(xas))
  857. return NULL;
  858. if (!xas->xa_node) {
  859. xas->xa_index = 1;
  860. goto out;
  861. } else if (xas_top(xas->xa_node)) {
  862. advance = false;
  863. entry = xa_head(xas->xa);
  864. xas->xa_node = NULL;
  865. if (xas->xa_index > max_index(entry))
  866. goto bounds;
  867. if (!xa_is_node(entry)) {
  868. if (xa_marked(xas->xa, mark))
  869. return entry;
  870. xas->xa_index = 1;
  871. goto out;
  872. }
  873. xas->xa_node = xa_to_node(entry);
  874. xas->xa_offset = xas->xa_index >> xas->xa_node->shift;
  875. }
  876. while (xas->xa_index <= max) {
  877. if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) {
  878. xas->xa_offset = xas->xa_node->offset + 1;
  879. xas->xa_node = xa_parent(xas->xa, xas->xa_node);
  880. if (!xas->xa_node)
  881. break;
  882. advance = false;
  883. continue;
  884. }
  885. if (!advance) {
  886. entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
  887. if (xa_is_sibling(entry)) {
  888. xas->xa_offset = xa_to_sibling(entry);
  889. xas_move_index(xas, xas->xa_offset);
  890. }
  891. }
  892. offset = xas_find_chunk(xas, advance, mark);
  893. if (offset > xas->xa_offset) {
  894. advance = false;
  895. xas_move_index(xas, offset);
  896. /* Mind the wrap */
  897. if ((xas->xa_index - 1) >= max)
  898. goto max;
  899. xas->xa_offset = offset;
  900. if (offset == XA_CHUNK_SIZE)
  901. continue;
  902. }
  903. entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
  904. if (!xa_is_node(entry))
  905. return entry;
  906. xas->xa_node = xa_to_node(entry);
  907. xas_set_offset(xas);
  908. }
  909. out:
  910. if (!max)
  911. goto max;
  912. bounds:
  913. xas->xa_node = XAS_BOUNDS;
  914. return NULL;
  915. max:
  916. xas->xa_node = XAS_RESTART;
  917. return NULL;
  918. }
  919. EXPORT_SYMBOL_GPL(xas_find_marked);
  920. /**
  921. * xa_init_flags() - Initialise an empty XArray with flags.
  922. * @xa: XArray.
  923. * @flags: XA_FLAG values.
  924. *
  925. * If you need to initialise an XArray with special flags (eg you need
  926. * to take the lock from interrupt context), use this function instead
  927. * of xa_init().
  928. *
  929. * Context: Any context.
  930. */
  931. void xa_init_flags(struct xarray *xa, gfp_t flags)
  932. {
  933. unsigned int lock_type;
  934. static struct lock_class_key xa_lock_irq;
  935. static struct lock_class_key xa_lock_bh;
  936. spin_lock_init(&xa->xa_lock);
  937. xa->xa_flags = flags;
  938. xa->xa_head = NULL;
  939. lock_type = xa_lock_type(xa);
  940. if (lock_type == XA_LOCK_IRQ)
  941. lockdep_set_class(&xa->xa_lock, &xa_lock_irq);
  942. else if (lock_type == XA_LOCK_BH)
  943. lockdep_set_class(&xa->xa_lock, &xa_lock_bh);
  944. }
  945. EXPORT_SYMBOL(xa_init_flags);
  946. /**
  947. * xa_load() - Load an entry from an XArray.
  948. * @xa: XArray.
  949. * @index: index into array.
  950. *
  951. * Context: Any context. Takes and releases the RCU lock.
  952. * Return: The entry at @index in @xa.
  953. */
  954. void *xa_load(struct xarray *xa, unsigned long index)
  955. {
  956. XA_STATE(xas, xa, index);
  957. void *entry;
  958. rcu_read_lock();
  959. do {
  960. entry = xas_load(&xas);
  961. } while (xas_retry(&xas, entry));
  962. rcu_read_unlock();
  963. return entry;
  964. }
  965. EXPORT_SYMBOL(xa_load);
  966. static void *xas_result(struct xa_state *xas, void *curr)
  967. {
  968. XA_NODE_BUG_ON(xas->xa_node, xa_is_internal(curr));
  969. if (xas_error(xas))
  970. curr = xas->xa_node;
  971. return curr;
  972. }
  973. /**
  974. * __xa_erase() - Erase this entry from the XArray while locked.
  975. * @xa: XArray.
  976. * @index: Index into array.
  977. *
  978. * If the entry at this index is a multi-index entry then all indices will
  979. * be erased, and the entry will no longer be a multi-index entry.
  980. * This function expects the xa_lock to be held on entry.
  981. *
  982. * Context: Any context. Expects xa_lock to be held on entry. May
  983. * release and reacquire xa_lock if @gfp flags permit.
  984. * Return: The old entry at this index.
  985. */
  986. void *__xa_erase(struct xarray *xa, unsigned long index)
  987. {
  988. XA_STATE(xas, xa, index);
  989. return xas_result(&xas, xas_store(&xas, NULL));
  990. }
  991. EXPORT_SYMBOL_GPL(__xa_erase);
  992. /**
  993. * xa_store() - Store this entry in the XArray.
  994. * @xa: XArray.
  995. * @index: Index into array.
  996. * @entry: New entry.
  997. * @gfp: Memory allocation flags.
  998. *
  999. * After this function returns, loads from this index will return @entry.
  1000. * Storing into an existing multislot entry updates the entry of every index.
  1001. * The marks associated with @index are unaffected unless @entry is %NULL.
  1002. *
  1003. * Context: Process context. Takes and releases the xa_lock. May sleep
  1004. * if the @gfp flags permit.
  1005. * Return: The old entry at this index on success, xa_err(-EINVAL) if @entry
  1006. * cannot be stored in an XArray, or xa_err(-ENOMEM) if memory allocation
  1007. * failed.
  1008. */
  1009. void *xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
  1010. {
  1011. XA_STATE(xas, xa, index);
  1012. void *curr;
  1013. if (WARN_ON_ONCE(xa_is_internal(entry)))
  1014. return XA_ERROR(-EINVAL);
  1015. do {
  1016. xas_lock(&xas);
  1017. curr = xas_store(&xas, entry);
  1018. xas_unlock(&xas);
  1019. } while (xas_nomem(&xas, gfp));
  1020. return xas_result(&xas, curr);
  1021. }
  1022. EXPORT_SYMBOL(xa_store);
  1023. /**
  1024. * __xa_store() - Store this entry in the XArray.
  1025. * @xa: XArray.
  1026. * @index: Index into array.
  1027. * @entry: New entry.
  1028. * @gfp: Memory allocation flags.
  1029. *
  1030. * You must already be holding the xa_lock when calling this function.
  1031. * It will drop the lock if needed to allocate memory, and then reacquire
  1032. * it afterwards.
  1033. *
  1034. * Context: Any context. Expects xa_lock to be held on entry. May
  1035. * release and reacquire xa_lock if @gfp flags permit.
  1036. * Return: The old entry at this index or xa_err() if an error happened.
  1037. */
  1038. void *__xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
  1039. {
  1040. XA_STATE(xas, xa, index);
  1041. void *curr;
  1042. if (WARN_ON_ONCE(xa_is_internal(entry)))
  1043. return XA_ERROR(-EINVAL);
  1044. do {
  1045. curr = xas_store(&xas, entry);
  1046. } while (__xas_nomem(&xas, gfp));
  1047. return xas_result(&xas, curr);
  1048. }
  1049. EXPORT_SYMBOL(__xa_store);
  1050. /**
  1051. * xa_cmpxchg() - Conditionally replace an entry in the XArray.
  1052. * @xa: XArray.
  1053. * @index: Index into array.
  1054. * @old: Old value to test against.
  1055. * @entry: New value to place in array.
  1056. * @gfp: Memory allocation flags.
  1057. *
  1058. * If the entry at @index is the same as @old, replace it with @entry.
  1059. * If the return value is equal to @old, then the exchange was successful.
  1060. *
  1061. * Context: Process context. Takes and releases the xa_lock. May sleep
  1062. * if the @gfp flags permit.
  1063. * Return: The old value at this index or xa_err() if an error happened.
  1064. */
  1065. void *xa_cmpxchg(struct xarray *xa, unsigned long index,
  1066. void *old, void *entry, gfp_t gfp)
  1067. {
  1068. XA_STATE(xas, xa, index);
  1069. void *curr;
  1070. if (WARN_ON_ONCE(xa_is_internal(entry)))
  1071. return XA_ERROR(-EINVAL);
  1072. do {
  1073. xas_lock(&xas);
  1074. curr = xas_load(&xas);
  1075. if (curr == old)
  1076. xas_store(&xas, entry);
  1077. xas_unlock(&xas);
  1078. } while (xas_nomem(&xas, gfp));
  1079. return xas_result(&xas, curr);
  1080. }
  1081. EXPORT_SYMBOL(xa_cmpxchg);
  1082. /**
  1083. * __xa_cmpxchg() - Store this entry in the XArray.
  1084. * @xa: XArray.
  1085. * @index: Index into array.
  1086. * @old: Old value to test against.
  1087. * @entry: New entry.
  1088. * @gfp: Memory allocation flags.
  1089. *
  1090. * You must already be holding the xa_lock when calling this function.
  1091. * It will drop the lock if needed to allocate memory, and then reacquire
  1092. * it afterwards.
  1093. *
  1094. * Context: Any context. Expects xa_lock to be held on entry. May
  1095. * release and reacquire xa_lock if @gfp flags permit.
  1096. * Return: The old entry at this index or xa_err() if an error happened.
  1097. */
  1098. void *__xa_cmpxchg(struct xarray *xa, unsigned long index,
  1099. void *old, void *entry, gfp_t gfp)
  1100. {
  1101. XA_STATE(xas, xa, index);
  1102. void *curr;
  1103. if (WARN_ON_ONCE(xa_is_internal(entry)))
  1104. return XA_ERROR(-EINVAL);
  1105. do {
  1106. curr = xas_load(&xas);
  1107. if (curr == old)
  1108. xas_store(&xas, entry);
  1109. } while (__xas_nomem(&xas, gfp));
  1110. return xas_result(&xas, curr);
  1111. }
  1112. EXPORT_SYMBOL(__xa_cmpxchg);
  1113. /**
  1114. * __xa_set_mark() - Set this mark on this entry while locked.
  1115. * @xa: XArray.
  1116. * @index: Index of entry.
  1117. * @mark: Mark number.
  1118. *
  1119. * Attempting to set a mark on a NULL entry does not succeed.
  1120. *
  1121. * Context: Any context. Expects xa_lock to be held on entry.
  1122. */
  1123. void __xa_set_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
  1124. {
  1125. XA_STATE(xas, xa, index);
  1126. void *entry = xas_load(&xas);
  1127. if (entry)
  1128. xas_set_mark(&xas, mark);
  1129. }
  1130. EXPORT_SYMBOL_GPL(__xa_set_mark);
  1131. /**
  1132. * __xa_clear_mark() - Clear this mark on this entry while locked.
  1133. * @xa: XArray.
  1134. * @index: Index of entry.
  1135. * @mark: Mark number.
  1136. *
  1137. * Context: Any context. Expects xa_lock to be held on entry.
  1138. */
  1139. void __xa_clear_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
  1140. {
  1141. XA_STATE(xas, xa, index);
  1142. void *entry = xas_load(&xas);
  1143. if (entry)
  1144. xas_clear_mark(&xas, mark);
  1145. }
  1146. EXPORT_SYMBOL_GPL(__xa_clear_mark);
  1147. /**
  1148. * xa_get_mark() - Inquire whether this mark is set on this entry.
  1149. * @xa: XArray.
  1150. * @index: Index of entry.
  1151. * @mark: Mark number.
  1152. *
  1153. * This function uses the RCU read lock, so the result may be out of date
  1154. * by the time it returns. If you need the result to be stable, use a lock.
  1155. *
  1156. * Context: Any context. Takes and releases the RCU lock.
  1157. * Return: True if the entry at @index has this mark set, false if it doesn't.
  1158. */
  1159. bool xa_get_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
  1160. {
  1161. XA_STATE(xas, xa, index);
  1162. void *entry;
  1163. rcu_read_lock();
  1164. entry = xas_start(&xas);
  1165. while (xas_get_mark(&xas, mark)) {
  1166. if (!xa_is_node(entry))
  1167. goto found;
  1168. entry = xas_descend(&xas, xa_to_node(entry));
  1169. }
  1170. rcu_read_unlock();
  1171. return false;
  1172. found:
  1173. rcu_read_unlock();
  1174. return true;
  1175. }
  1176. EXPORT_SYMBOL(xa_get_mark);
  1177. /**
  1178. * xa_set_mark() - Set this mark on this entry.
  1179. * @xa: XArray.
  1180. * @index: Index of entry.
  1181. * @mark: Mark number.
  1182. *
  1183. * Attempting to set a mark on a NULL entry does not succeed.
  1184. *
  1185. * Context: Process context. Takes and releases the xa_lock.
  1186. */
  1187. void xa_set_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
  1188. {
  1189. xa_lock(xa);
  1190. __xa_set_mark(xa, index, mark);
  1191. xa_unlock(xa);
  1192. }
  1193. EXPORT_SYMBOL(xa_set_mark);
  1194. /**
  1195. * xa_clear_mark() - Clear this mark on this entry.
  1196. * @xa: XArray.
  1197. * @index: Index of entry.
  1198. * @mark: Mark number.
  1199. *
  1200. * Clearing a mark always succeeds.
  1201. *
  1202. * Context: Process context. Takes and releases the xa_lock.
  1203. */
  1204. void xa_clear_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
  1205. {
  1206. xa_lock(xa);
  1207. __xa_clear_mark(xa, index, mark);
  1208. xa_unlock(xa);
  1209. }
  1210. EXPORT_SYMBOL(xa_clear_mark);
  1211. /**
  1212. * xa_find() - Search the XArray for an entry.
  1213. * @xa: XArray.
  1214. * @indexp: Pointer to an index.
  1215. * @max: Maximum index to search to.
  1216. * @filter: Selection criterion.
  1217. *
  1218. * Finds the entry in @xa which matches the @filter, and has the lowest
  1219. * index that is at least @indexp and no more than @max.
  1220. * If an entry is found, @indexp is updated to be the index of the entry.
  1221. * This function is protected by the RCU read lock, so it may not find
  1222. * entries which are being simultaneously added. It will not return an
  1223. * %XA_RETRY_ENTRY; if you need to see retry entries, use xas_find().
  1224. *
  1225. * Context: Any context. Takes and releases the RCU lock.
  1226. * Return: The entry, if found, otherwise %NULL.
  1227. */
  1228. void *xa_find(struct xarray *xa, unsigned long *indexp,
  1229. unsigned long max, xa_mark_t filter)
  1230. {
  1231. XA_STATE(xas, xa, *indexp);
  1232. void *entry;
  1233. rcu_read_lock();
  1234. do {
  1235. if ((__force unsigned int)filter < XA_MAX_MARKS)
  1236. entry = xas_find_marked(&xas, max, filter);
  1237. else
  1238. entry = xas_find(&xas, max);
  1239. } while (xas_retry(&xas, entry));
  1240. rcu_read_unlock();
  1241. if (entry)
  1242. *indexp = xas.xa_index;
  1243. return entry;
  1244. }
  1245. EXPORT_SYMBOL(xa_find);
  1246. /**
  1247. * xa_find_after() - Search the XArray for a present entry.
  1248. * @xa: XArray.
  1249. * @indexp: Pointer to an index.
  1250. * @max: Maximum index to search to.
  1251. * @filter: Selection criterion.
  1252. *
  1253. * Finds the entry in @xa which matches the @filter and has the lowest
  1254. * index that is above @indexp and no more than @max.
  1255. * If an entry is found, @indexp is updated to be the index of the entry.
  1256. * This function is protected by the RCU read lock, so it may miss entries
  1257. * which are being simultaneously added. It will not return an
  1258. * %XA_RETRY_ENTRY; if you need to see retry entries, use xas_find().
  1259. *
  1260. * Context: Any context. Takes and releases the RCU lock.
  1261. * Return: The pointer, if found, otherwise %NULL.
  1262. */
  1263. void *xa_find_after(struct xarray *xa, unsigned long *indexp,
  1264. unsigned long max, xa_mark_t filter)
  1265. {
  1266. XA_STATE(xas, xa, *indexp + 1);
  1267. void *entry;
  1268. rcu_read_lock();
  1269. for (;;) {
  1270. if ((__force unsigned int)filter < XA_MAX_MARKS)
  1271. entry = xas_find_marked(&xas, max, filter);
  1272. else
  1273. entry = xas_find(&xas, max);
  1274. if (xas.xa_shift) {
  1275. if (xas.xa_index & ((1UL << xas.xa_shift) - 1))
  1276. continue;
  1277. } else {
  1278. if (xas.xa_offset < (xas.xa_index & XA_CHUNK_MASK))
  1279. continue;
  1280. }
  1281. if (!xas_retry(&xas, entry))
  1282. break;
  1283. }
  1284. rcu_read_unlock();
  1285. if (entry)
  1286. *indexp = xas.xa_index;
  1287. return entry;
  1288. }
  1289. EXPORT_SYMBOL(xa_find_after);
  1290. #ifdef XA_DEBUG
  1291. void xa_dump_node(const struct xa_node *node)
  1292. {
  1293. unsigned i, j;
  1294. if (!node)
  1295. return;
  1296. if ((unsigned long)node & 3) {
  1297. pr_cont("node %px\n", node);
  1298. return;
  1299. }
  1300. pr_cont("node %px %s %d parent %px shift %d count %d values %d "
  1301. "array %px list %px %px marks",
  1302. node, node->parent ? "offset" : "max", node->offset,
  1303. node->parent, node->shift, node->count, node->nr_values,
  1304. node->array, node->private_list.prev, node->private_list.next);
  1305. for (i = 0; i < XA_MAX_MARKS; i++)
  1306. for (j = 0; j < XA_MARK_LONGS; j++)
  1307. pr_cont(" %lx", node->marks[i][j]);
  1308. pr_cont("\n");
  1309. }
  1310. void xa_dump_index(unsigned long index, unsigned int shift)
  1311. {
  1312. if (!shift)
  1313. pr_info("%lu: ", index);
  1314. else if (shift >= BITS_PER_LONG)
  1315. pr_info("0-%lu: ", ~0UL);
  1316. else
  1317. pr_info("%lu-%lu: ", index, index | ((1UL << shift) - 1));
  1318. }
  1319. void xa_dump_entry(const void *entry, unsigned long index, unsigned long shift)
  1320. {
  1321. if (!entry)
  1322. return;
  1323. xa_dump_index(index, shift);
  1324. if (xa_is_node(entry)) {
  1325. if (shift == 0) {
  1326. pr_cont("%px\n", entry);
  1327. } else {
  1328. unsigned long i;
  1329. struct xa_node *node = xa_to_node(entry);
  1330. xa_dump_node(node);
  1331. for (i = 0; i < XA_CHUNK_SIZE; i++)
  1332. xa_dump_entry(node->slots[i],
  1333. index + (i << node->shift), node->shift);
  1334. }
  1335. } else if (xa_is_value(entry))
  1336. pr_cont("value %ld (0x%lx) [%px]\n", xa_to_value(entry),
  1337. xa_to_value(entry), entry);
  1338. else if (!xa_is_internal(entry))
  1339. pr_cont("%px\n", entry);
  1340. else if (xa_is_retry(entry))
  1341. pr_cont("retry (%ld)\n", xa_to_internal(entry));
  1342. else if (xa_is_sibling(entry))
  1343. pr_cont("sibling (slot %ld)\n", xa_to_sibling(entry));
  1344. else
  1345. pr_cont("UNKNOWN ENTRY (%px)\n", entry);
  1346. }
  1347. void xa_dump(const struct xarray *xa)
  1348. {
  1349. void *entry = xa->xa_head;
  1350. unsigned int shift = 0;
  1351. pr_info("xarray: %px head %px flags %x marks %d %d %d\n", xa, entry,
  1352. xa->xa_flags, xa_marked(xa, XA_MARK_0),
  1353. xa_marked(xa, XA_MARK_1), xa_marked(xa, XA_MARK_2));
  1354. if (xa_is_node(entry))
  1355. shift = xa_to_node(entry)->shift + XA_CHUNK_SHIFT;
  1356. xa_dump_entry(entry, 0, shift);
  1357. }
  1358. #endif