xarray.c 45 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * XArray implementation
  4. * Copyright (c) 2017 Microsoft Corporation
  5. * Author: Matthew Wilcox <willy@infradead.org>
  6. */
  7. #include <linux/bitmap.h>
  8. #include <linux/export.h>
  9. #include <linux/list.h>
  10. #include <linux/slab.h>
  11. #include <linux/xarray.h>
  12. /*
  13. * Coding conventions in this file:
  14. *
  15. * @xa is used to refer to the entire xarray.
  16. * @xas is the 'xarray operation state'. It may be either a pointer to
  17. * an xa_state, or an xa_state stored on the stack. This is an unfortunate
  18. * ambiguity.
  19. * @index is the index of the entry being operated on
  20. * @mark is an xa_mark_t; a small number indicating one of the mark bits.
  21. * @node refers to an xa_node; usually the primary one being operated on by
  22. * this function.
  23. * @offset is the index into the slots array inside an xa_node.
  24. * @parent refers to the @xa_node closer to the head than @node.
  25. * @entry refers to something stored in a slot in the xarray
  26. */
  27. static inline unsigned int xa_lock_type(const struct xarray *xa)
  28. {
  29. return (__force unsigned int)xa->xa_flags & 3;
  30. }
  31. static inline void xas_lock_type(struct xa_state *xas, unsigned int lock_type)
  32. {
  33. if (lock_type == XA_LOCK_IRQ)
  34. xas_lock_irq(xas);
  35. else if (lock_type == XA_LOCK_BH)
  36. xas_lock_bh(xas);
  37. else
  38. xas_lock(xas);
  39. }
  40. static inline void xas_unlock_type(struct xa_state *xas, unsigned int lock_type)
  41. {
  42. if (lock_type == XA_LOCK_IRQ)
  43. xas_unlock_irq(xas);
  44. else if (lock_type == XA_LOCK_BH)
  45. xas_unlock_bh(xas);
  46. else
  47. xas_unlock(xas);
  48. }
  49. static inline void xa_mark_set(struct xarray *xa, xa_mark_t mark)
  50. {
  51. if (!(xa->xa_flags & XA_FLAGS_MARK(mark)))
  52. xa->xa_flags |= XA_FLAGS_MARK(mark);
  53. }
  54. static inline void xa_mark_clear(struct xarray *xa, xa_mark_t mark)
  55. {
  56. if (xa->xa_flags & XA_FLAGS_MARK(mark))
  57. xa->xa_flags &= ~(XA_FLAGS_MARK(mark));
  58. }
  59. static inline unsigned long *node_marks(struct xa_node *node, xa_mark_t mark)
  60. {
  61. return node->marks[(__force unsigned)mark];
  62. }
  63. static inline bool node_get_mark(struct xa_node *node,
  64. unsigned int offset, xa_mark_t mark)
  65. {
  66. return test_bit(offset, node_marks(node, mark));
  67. }
  68. /* returns true if the bit was set */
  69. static inline bool node_set_mark(struct xa_node *node, unsigned int offset,
  70. xa_mark_t mark)
  71. {
  72. return __test_and_set_bit(offset, node_marks(node, mark));
  73. }
  74. /* returns true if the bit was set */
  75. static inline bool node_clear_mark(struct xa_node *node, unsigned int offset,
  76. xa_mark_t mark)
  77. {
  78. return __test_and_clear_bit(offset, node_marks(node, mark));
  79. }
  80. static inline bool node_any_mark(struct xa_node *node, xa_mark_t mark)
  81. {
  82. return !bitmap_empty(node_marks(node, mark), XA_CHUNK_SIZE);
  83. }
  84. #define mark_inc(mark) do { \
  85. mark = (__force xa_mark_t)((__force unsigned)(mark) + 1); \
  86. } while (0)
  87. /*
  88. * xas_squash_marks() - Merge all marks to the first entry
  89. * @xas: Array operation state.
  90. *
  91. * Set a mark on the first entry if any entry has it set. Clear marks on
  92. * all sibling entries.
  93. */
  94. static void xas_squash_marks(const struct xa_state *xas)
  95. {
  96. unsigned int mark = 0;
  97. unsigned int limit = xas->xa_offset + xas->xa_sibs + 1;
  98. if (!xas->xa_sibs)
  99. return;
  100. do {
  101. unsigned long *marks = xas->xa_node->marks[mark];
  102. if (find_next_bit(marks, limit, xas->xa_offset + 1) == limit)
  103. continue;
  104. __set_bit(xas->xa_offset, marks);
  105. bitmap_clear(marks, xas->xa_offset + 1, xas->xa_sibs);
  106. } while (mark++ != (__force unsigned)XA_MARK_MAX);
  107. }
  108. /* extracts the offset within this node from the index */
  109. static unsigned int get_offset(unsigned long index, struct xa_node *node)
  110. {
  111. return (index >> node->shift) & XA_CHUNK_MASK;
  112. }
  113. static void xas_set_offset(struct xa_state *xas)
  114. {
  115. xas->xa_offset = get_offset(xas->xa_index, xas->xa_node);
  116. }
  117. /* move the index either forwards (find) or backwards (sibling slot) */
  118. static void xas_move_index(struct xa_state *xas, unsigned long offset)
  119. {
  120. unsigned int shift = xas->xa_node->shift;
  121. xas->xa_index &= ~XA_CHUNK_MASK << shift;
  122. xas->xa_index += offset << shift;
  123. }
  124. static void xas_advance(struct xa_state *xas)
  125. {
  126. xas->xa_offset++;
  127. xas_move_index(xas, xas->xa_offset);
  128. }
  129. static void *set_bounds(struct xa_state *xas)
  130. {
  131. xas->xa_node = XAS_BOUNDS;
  132. return NULL;
  133. }
  134. /*
  135. * Starts a walk. If the @xas is already valid, we assume that it's on
  136. * the right path and just return where we've got to. If we're in an
  137. * error state, return NULL. If the index is outside the current scope
  138. * of the xarray, return NULL without changing @xas->xa_node. Otherwise
  139. * set @xas->xa_node to NULL and return the current head of the array.
  140. */
  141. static void *xas_start(struct xa_state *xas)
  142. {
  143. void *entry;
  144. if (xas_valid(xas))
  145. return xas_reload(xas);
  146. if (xas_error(xas))
  147. return NULL;
  148. entry = xa_head(xas->xa);
  149. if (!xa_is_node(entry)) {
  150. if (xas->xa_index)
  151. return set_bounds(xas);
  152. } else {
  153. if ((xas->xa_index >> xa_to_node(entry)->shift) > XA_CHUNK_MASK)
  154. return set_bounds(xas);
  155. }
  156. xas->xa_node = NULL;
  157. return entry;
  158. }
  159. static void *xas_descend(struct xa_state *xas, struct xa_node *node)
  160. {
  161. unsigned int offset = get_offset(xas->xa_index, node);
  162. void *entry = xa_entry(xas->xa, node, offset);
  163. xas->xa_node = node;
  164. if (xa_is_sibling(entry)) {
  165. offset = xa_to_sibling(entry);
  166. entry = xa_entry(xas->xa, node, offset);
  167. }
  168. xas->xa_offset = offset;
  169. return entry;
  170. }
  171. /**
  172. * xas_load() - Load an entry from the XArray (advanced).
  173. * @xas: XArray operation state.
  174. *
  175. * Usually walks the @xas to the appropriate state to load the entry
  176. * stored at xa_index. However, it will do nothing and return %NULL if
  177. * @xas is in an error state. xas_load() will never expand the tree.
  178. *
  179. * If the xa_state is set up to operate on a multi-index entry, xas_load()
  180. * may return %NULL or an internal entry, even if there are entries
  181. * present within the range specified by @xas.
  182. *
  183. * Context: Any context. The caller should hold the xa_lock or the RCU lock.
  184. * Return: Usually an entry in the XArray, but see description for exceptions.
  185. */
  186. void *xas_load(struct xa_state *xas)
  187. {
  188. void *entry = xas_start(xas);
  189. while (xa_is_node(entry)) {
  190. struct xa_node *node = xa_to_node(entry);
  191. if (xas->xa_shift > node->shift)
  192. break;
  193. entry = xas_descend(xas, node);
  194. }
  195. return entry;
  196. }
  197. EXPORT_SYMBOL_GPL(xas_load);
  198. /* Move the radix tree node cache here */
  199. extern struct kmem_cache *radix_tree_node_cachep;
  200. extern void radix_tree_node_rcu_free(struct rcu_head *head);
  201. #define XA_RCU_FREE ((struct xarray *)1)
  202. static void xa_node_free(struct xa_node *node)
  203. {
  204. XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
  205. node->array = XA_RCU_FREE;
  206. call_rcu(&node->rcu_head, radix_tree_node_rcu_free);
  207. }
  208. /*
  209. * xas_destroy() - Free any resources allocated during the XArray operation.
  210. * @xas: XArray operation state.
  211. *
  212. * This function is now internal-only.
  213. */
  214. static void xas_destroy(struct xa_state *xas)
  215. {
  216. struct xa_node *node = xas->xa_alloc;
  217. if (!node)
  218. return;
  219. XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
  220. kmem_cache_free(radix_tree_node_cachep, node);
  221. xas->xa_alloc = NULL;
  222. }
  223. /**
  224. * xas_nomem() - Allocate memory if needed.
  225. * @xas: XArray operation state.
  226. * @gfp: Memory allocation flags.
  227. *
  228. * If we need to add new nodes to the XArray, we try to allocate memory
  229. * with GFP_NOWAIT while holding the lock, which will usually succeed.
  230. * If it fails, @xas is flagged as needing memory to continue. The caller
  231. * should drop the lock and call xas_nomem(). If xas_nomem() succeeds,
  232. * the caller should retry the operation.
  233. *
  234. * Forward progress is guaranteed as one node is allocated here and
  235. * stored in the xa_state where it will be found by xas_alloc(). More
  236. * nodes will likely be found in the slab allocator, but we do not tie
  237. * them up here.
  238. *
  239. * Return: true if memory was needed, and was successfully allocated.
  240. */
  241. bool xas_nomem(struct xa_state *xas, gfp_t gfp)
  242. {
  243. if (xas->xa_node != XA_ERROR(-ENOMEM)) {
  244. xas_destroy(xas);
  245. return false;
  246. }
  247. xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
  248. if (!xas->xa_alloc)
  249. return false;
  250. XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list));
  251. xas->xa_node = XAS_RESTART;
  252. return true;
  253. }
  254. EXPORT_SYMBOL_GPL(xas_nomem);
  255. /*
  256. * __xas_nomem() - Drop locks and allocate memory if needed.
  257. * @xas: XArray operation state.
  258. * @gfp: Memory allocation flags.
  259. *
  260. * Internal variant of xas_nomem().
  261. *
  262. * Return: true if memory was needed, and was successfully allocated.
  263. */
  264. static bool __xas_nomem(struct xa_state *xas, gfp_t gfp)
  265. __must_hold(xas->xa->xa_lock)
  266. {
  267. unsigned int lock_type = xa_lock_type(xas->xa);
  268. if (xas->xa_node != XA_ERROR(-ENOMEM)) {
  269. xas_destroy(xas);
  270. return false;
  271. }
  272. if (gfpflags_allow_blocking(gfp)) {
  273. xas_unlock_type(xas, lock_type);
  274. xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
  275. xas_lock_type(xas, lock_type);
  276. } else {
  277. xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
  278. }
  279. if (!xas->xa_alloc)
  280. return false;
  281. XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list));
  282. xas->xa_node = XAS_RESTART;
  283. return true;
  284. }
  285. static void xas_update(struct xa_state *xas, struct xa_node *node)
  286. {
  287. if (xas->xa_update)
  288. xas->xa_update(node);
  289. else
  290. XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
  291. }
  292. static void *xas_alloc(struct xa_state *xas, unsigned int shift)
  293. {
  294. struct xa_node *parent = xas->xa_node;
  295. struct xa_node *node = xas->xa_alloc;
  296. if (xas_invalid(xas))
  297. return NULL;
  298. if (node) {
  299. xas->xa_alloc = NULL;
  300. } else {
  301. node = kmem_cache_alloc(radix_tree_node_cachep,
  302. GFP_NOWAIT | __GFP_NOWARN);
  303. if (!node) {
  304. xas_set_err(xas, -ENOMEM);
  305. return NULL;
  306. }
  307. }
  308. if (parent) {
  309. node->offset = xas->xa_offset;
  310. parent->count++;
  311. XA_NODE_BUG_ON(node, parent->count > XA_CHUNK_SIZE);
  312. xas_update(xas, parent);
  313. }
  314. XA_NODE_BUG_ON(node, shift > BITS_PER_LONG);
  315. XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
  316. node->shift = shift;
  317. node->count = 0;
  318. node->nr_values = 0;
  319. RCU_INIT_POINTER(node->parent, xas->xa_node);
  320. node->array = xas->xa;
  321. return node;
  322. }
  323. /*
  324. * Use this to calculate the maximum index that will need to be created
  325. * in order to add the entry described by @xas. Because we cannot store a
  326. * multiple-index entry at index 0, the calculation is a little more complex
  327. * than you might expect.
  328. */
  329. static unsigned long xas_max(struct xa_state *xas)
  330. {
  331. unsigned long max = xas->xa_index;
  332. #ifdef CONFIG_XARRAY_MULTI
  333. if (xas->xa_shift || xas->xa_sibs) {
  334. unsigned long mask;
  335. mask = (((xas->xa_sibs + 1UL) << xas->xa_shift) - 1);
  336. max |= mask;
  337. if (mask == max)
  338. max++;
  339. }
  340. #endif
  341. return max;
  342. }
  343. /* The maximum index that can be contained in the array without expanding it */
  344. static unsigned long max_index(void *entry)
  345. {
  346. if (!xa_is_node(entry))
  347. return 0;
  348. return (XA_CHUNK_SIZE << xa_to_node(entry)->shift) - 1;
  349. }
  350. static void xas_shrink(struct xa_state *xas)
  351. {
  352. struct xarray *xa = xas->xa;
  353. struct xa_node *node = xas->xa_node;
  354. for (;;) {
  355. void *entry;
  356. XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
  357. if (node->count != 1)
  358. break;
  359. entry = xa_entry_locked(xa, node, 0);
  360. if (!entry)
  361. break;
  362. if (!xa_is_node(entry) && node->shift)
  363. break;
  364. xas->xa_node = XAS_BOUNDS;
  365. RCU_INIT_POINTER(xa->xa_head, entry);
  366. node->count = 0;
  367. node->nr_values = 0;
  368. if (!xa_is_node(entry))
  369. RCU_INIT_POINTER(node->slots[0], XA_RETRY_ENTRY);
  370. xas_update(xas, node);
  371. xa_node_free(node);
  372. if (!xa_is_node(entry))
  373. break;
  374. node = xa_to_node(entry);
  375. node->parent = NULL;
  376. }
  377. }
  378. /*
  379. * xas_delete_node() - Attempt to delete an xa_node
  380. * @xas: Array operation state.
  381. *
  382. * Attempts to delete the @xas->xa_node. This will fail if xa->node has
  383. * a non-zero reference count.
  384. */
  385. static void xas_delete_node(struct xa_state *xas)
  386. {
  387. struct xa_node *node = xas->xa_node;
  388. for (;;) {
  389. struct xa_node *parent;
  390. XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
  391. if (node->count)
  392. break;
  393. parent = xa_parent_locked(xas->xa, node);
  394. xas->xa_node = parent;
  395. xas->xa_offset = node->offset;
  396. xa_node_free(node);
  397. if (!parent) {
  398. xas->xa->xa_head = NULL;
  399. xas->xa_node = XAS_BOUNDS;
  400. return;
  401. }
  402. parent->slots[xas->xa_offset] = NULL;
  403. parent->count--;
  404. XA_NODE_BUG_ON(parent, parent->count > XA_CHUNK_SIZE);
  405. node = parent;
  406. xas_update(xas, node);
  407. }
  408. if (!node->parent)
  409. xas_shrink(xas);
  410. }
  411. /**
  412. * xas_free_nodes() - Free this node and all nodes that it references
  413. * @xas: Array operation state.
  414. * @top: Node to free
  415. *
  416. * This node has been removed from the tree. We must now free it and all
  417. * of its subnodes. There may be RCU walkers with references into the tree,
  418. * so we must replace all entries with retry markers.
  419. */
  420. static void xas_free_nodes(struct xa_state *xas, struct xa_node *top)
  421. {
  422. unsigned int offset = 0;
  423. struct xa_node *node = top;
  424. for (;;) {
  425. void *entry = xa_entry_locked(xas->xa, node, offset);
  426. if (xa_is_node(entry)) {
  427. node = xa_to_node(entry);
  428. offset = 0;
  429. continue;
  430. }
  431. if (entry)
  432. RCU_INIT_POINTER(node->slots[offset], XA_RETRY_ENTRY);
  433. offset++;
  434. while (offset == XA_CHUNK_SIZE) {
  435. struct xa_node *parent;
  436. parent = xa_parent_locked(xas->xa, node);
  437. offset = node->offset + 1;
  438. node->count = 0;
  439. node->nr_values = 0;
  440. xas_update(xas, node);
  441. xa_node_free(node);
  442. if (node == top)
  443. return;
  444. node = parent;
  445. }
  446. }
  447. }
  448. /*
  449. * xas_expand adds nodes to the head of the tree until it has reached
  450. * sufficient height to be able to contain @xas->xa_index
  451. */
  452. static int xas_expand(struct xa_state *xas, void *head)
  453. {
  454. struct xarray *xa = xas->xa;
  455. struct xa_node *node = NULL;
  456. unsigned int shift = 0;
  457. unsigned long max = xas_max(xas);
  458. if (!head) {
  459. if (max == 0)
  460. return 0;
  461. while ((max >> shift) >= XA_CHUNK_SIZE)
  462. shift += XA_CHUNK_SHIFT;
  463. return shift + XA_CHUNK_SHIFT;
  464. } else if (xa_is_node(head)) {
  465. node = xa_to_node(head);
  466. shift = node->shift + XA_CHUNK_SHIFT;
  467. }
  468. xas->xa_node = NULL;
  469. while (max > max_index(head)) {
  470. xa_mark_t mark = 0;
  471. XA_NODE_BUG_ON(node, shift > BITS_PER_LONG);
  472. node = xas_alloc(xas, shift);
  473. if (!node)
  474. return -ENOMEM;
  475. node->count = 1;
  476. if (xa_is_value(head))
  477. node->nr_values = 1;
  478. RCU_INIT_POINTER(node->slots[0], head);
  479. /* Propagate the aggregated mark info to the new child */
  480. for (;;) {
  481. if (xa_marked(xa, mark))
  482. node_set_mark(node, 0, mark);
  483. if (mark == XA_MARK_MAX)
  484. break;
  485. mark_inc(mark);
  486. }
  487. /*
  488. * Now that the new node is fully initialised, we can add
  489. * it to the tree
  490. */
  491. if (xa_is_node(head)) {
  492. xa_to_node(head)->offset = 0;
  493. rcu_assign_pointer(xa_to_node(head)->parent, node);
  494. }
  495. head = xa_mk_node(node);
  496. rcu_assign_pointer(xa->xa_head, head);
  497. xas_update(xas, node);
  498. shift += XA_CHUNK_SHIFT;
  499. }
  500. xas->xa_node = node;
  501. return shift;
  502. }
  503. /*
  504. * xas_create() - Create a slot to store an entry in.
  505. * @xas: XArray operation state.
  506. *
  507. * Most users will not need to call this function directly, as it is called
  508. * by xas_store(). It is useful for doing conditional store operations
  509. * (see the xa_cmpxchg() implementation for an example).
  510. *
  511. * Return: If the slot already existed, returns the contents of this slot.
  512. * If the slot was newly created, returns NULL. If it failed to create the
  513. * slot, returns NULL and indicates the error in @xas.
  514. */
  515. static void *xas_create(struct xa_state *xas)
  516. {
  517. struct xarray *xa = xas->xa;
  518. void *entry;
  519. void __rcu **slot;
  520. struct xa_node *node = xas->xa_node;
  521. int shift;
  522. unsigned int order = xas->xa_shift;
  523. if (xas_top(node)) {
  524. entry = xa_head_locked(xa);
  525. xas->xa_node = NULL;
  526. shift = xas_expand(xas, entry);
  527. if (shift < 0)
  528. return NULL;
  529. entry = xa_head_locked(xa);
  530. slot = &xa->xa_head;
  531. } else if (xas_error(xas)) {
  532. return NULL;
  533. } else if (node) {
  534. unsigned int offset = xas->xa_offset;
  535. shift = node->shift;
  536. entry = xa_entry_locked(xa, node, offset);
  537. slot = &node->slots[offset];
  538. } else {
  539. shift = 0;
  540. entry = xa_head_locked(xa);
  541. slot = &xa->xa_head;
  542. }
  543. while (shift > order) {
  544. shift -= XA_CHUNK_SHIFT;
  545. if (!entry) {
  546. node = xas_alloc(xas, shift);
  547. if (!node)
  548. break;
  549. rcu_assign_pointer(*slot, xa_mk_node(node));
  550. } else if (xa_is_node(entry)) {
  551. node = xa_to_node(entry);
  552. } else {
  553. break;
  554. }
  555. entry = xas_descend(xas, node);
  556. slot = &node->slots[xas->xa_offset];
  557. }
  558. return entry;
  559. }
  560. static void update_node(struct xa_state *xas, struct xa_node *node,
  561. int count, int values)
  562. {
  563. if (!node || (!count && !values))
  564. return;
  565. node->count += count;
  566. node->nr_values += values;
  567. XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
  568. XA_NODE_BUG_ON(node, node->nr_values > XA_CHUNK_SIZE);
  569. xas_update(xas, node);
  570. if (count < 0)
  571. xas_delete_node(xas);
  572. }
  573. /**
  574. * xas_store() - Store this entry in the XArray.
  575. * @xas: XArray operation state.
  576. * @entry: New entry.
  577. *
  578. * If @xas is operating on a multi-index entry, the entry returned by this
  579. * function is essentially meaningless (it may be an internal entry or it
  580. * may be %NULL, even if there are non-NULL entries at some of the indices
  581. * covered by the range). This is not a problem for any current users,
  582. * and can be changed if needed.
  583. *
  584. * Return: The old entry at this index.
  585. */
  586. void *xas_store(struct xa_state *xas, void *entry)
  587. {
  588. struct xa_node *node;
  589. void __rcu **slot = &xas->xa->xa_head;
  590. unsigned int offset, max;
  591. int count = 0;
  592. int values = 0;
  593. void *first, *next;
  594. bool value = xa_is_value(entry);
  595. if (entry)
  596. first = xas_create(xas);
  597. else
  598. first = xas_load(xas);
  599. if (xas_invalid(xas))
  600. return first;
  601. node = xas->xa_node;
  602. if (node && (xas->xa_shift < node->shift))
  603. xas->xa_sibs = 0;
  604. if ((first == entry) && !xas->xa_sibs)
  605. return first;
  606. next = first;
  607. offset = xas->xa_offset;
  608. max = xas->xa_offset + xas->xa_sibs;
  609. if (node) {
  610. slot = &node->slots[offset];
  611. if (xas->xa_sibs)
  612. xas_squash_marks(xas);
  613. }
  614. if (!entry)
  615. xas_init_marks(xas);
  616. for (;;) {
  617. /*
  618. * Must clear the marks before setting the entry to NULL,
  619. * otherwise xas_for_each_marked may find a NULL entry and
  620. * stop early. rcu_assign_pointer contains a release barrier
  621. * so the mark clearing will appear to happen before the
  622. * entry is set to NULL.
  623. */
  624. rcu_assign_pointer(*slot, entry);
  625. if (xa_is_node(next))
  626. xas_free_nodes(xas, xa_to_node(next));
  627. if (!node)
  628. break;
  629. count += !next - !entry;
  630. values += !xa_is_value(first) - !value;
  631. if (entry) {
  632. if (offset == max)
  633. break;
  634. if (!xa_is_sibling(entry))
  635. entry = xa_mk_sibling(xas->xa_offset);
  636. } else {
  637. if (offset == XA_CHUNK_MASK)
  638. break;
  639. }
  640. next = xa_entry_locked(xas->xa, node, ++offset);
  641. if (!xa_is_sibling(next)) {
  642. if (!entry && (offset > max))
  643. break;
  644. first = next;
  645. }
  646. slot++;
  647. }
  648. update_node(xas, node, count, values);
  649. return first;
  650. }
  651. EXPORT_SYMBOL_GPL(xas_store);
  652. /**
  653. * xas_get_mark() - Returns the state of this mark.
  654. * @xas: XArray operation state.
  655. * @mark: Mark number.
  656. *
  657. * Return: true if the mark is set, false if the mark is clear or @xas
  658. * is in an error state.
  659. */
  660. bool xas_get_mark(const struct xa_state *xas, xa_mark_t mark)
  661. {
  662. if (xas_invalid(xas))
  663. return false;
  664. if (!xas->xa_node)
  665. return xa_marked(xas->xa, mark);
  666. return node_get_mark(xas->xa_node, xas->xa_offset, mark);
  667. }
  668. EXPORT_SYMBOL_GPL(xas_get_mark);
  669. /**
  670. * xas_set_mark() - Sets the mark on this entry and its parents.
  671. * @xas: XArray operation state.
  672. * @mark: Mark number.
  673. *
  674. * Sets the specified mark on this entry, and walks up the tree setting it
  675. * on all the ancestor entries. Does nothing if @xas has not been walked to
  676. * an entry, or is in an error state.
  677. */
  678. void xas_set_mark(const struct xa_state *xas, xa_mark_t mark)
  679. {
  680. struct xa_node *node = xas->xa_node;
  681. unsigned int offset = xas->xa_offset;
  682. if (xas_invalid(xas))
  683. return;
  684. while (node) {
  685. if (node_set_mark(node, offset, mark))
  686. return;
  687. offset = node->offset;
  688. node = xa_parent_locked(xas->xa, node);
  689. }
  690. if (!xa_marked(xas->xa, mark))
  691. xa_mark_set(xas->xa, mark);
  692. }
  693. EXPORT_SYMBOL_GPL(xas_set_mark);
  694. /**
  695. * xas_clear_mark() - Clears the mark on this entry and its parents.
  696. * @xas: XArray operation state.
  697. * @mark: Mark number.
  698. *
  699. * Clears the specified mark on this entry, and walks back to the head
  700. * attempting to clear it on all the ancestor entries. Does nothing if
  701. * @xas has not been walked to an entry, or is in an error state.
  702. */
  703. void xas_clear_mark(const struct xa_state *xas, xa_mark_t mark)
  704. {
  705. struct xa_node *node = xas->xa_node;
  706. unsigned int offset = xas->xa_offset;
  707. if (xas_invalid(xas))
  708. return;
  709. while (node) {
  710. if (!node_clear_mark(node, offset, mark))
  711. return;
  712. if (node_any_mark(node, mark))
  713. return;
  714. offset = node->offset;
  715. node = xa_parent_locked(xas->xa, node);
  716. }
  717. if (xa_marked(xas->xa, mark))
  718. xa_mark_clear(xas->xa, mark);
  719. }
  720. EXPORT_SYMBOL_GPL(xas_clear_mark);
  721. /**
  722. * xas_init_marks() - Initialise all marks for the entry
  723. * @xas: Array operations state.
  724. *
  725. * Initialise all marks for the entry specified by @xas. If we're tracking
  726. * free entries with a mark, we need to set it on all entries. All other
  727. * marks are cleared.
  728. *
  729. * This implementation is not as efficient as it could be; we may walk
  730. * up the tree multiple times.
  731. */
  732. void xas_init_marks(const struct xa_state *xas)
  733. {
  734. xa_mark_t mark = 0;
  735. for (;;) {
  736. xas_clear_mark(xas, mark);
  737. if (mark == XA_MARK_MAX)
  738. break;
  739. mark_inc(mark);
  740. }
  741. }
  742. EXPORT_SYMBOL_GPL(xas_init_marks);
  743. /**
  744. * xas_pause() - Pause a walk to drop a lock.
  745. * @xas: XArray operation state.
  746. *
  747. * Some users need to pause a walk and drop the lock they're holding in
  748. * order to yield to a higher priority thread or carry out an operation
  749. * on an entry. Those users should call this function before they drop
  750. * the lock. It resets the @xas to be suitable for the next iteration
  751. * of the loop after the user has reacquired the lock. If most entries
  752. * found during a walk require you to call xas_pause(), the xa_for_each()
  753. * iterator may be more appropriate.
  754. *
  755. * Note that xas_pause() only works for forward iteration. If a user needs
  756. * to pause a reverse iteration, we will need a xas_pause_rev().
  757. */
  758. void xas_pause(struct xa_state *xas)
  759. {
  760. struct xa_node *node = xas->xa_node;
  761. if (xas_invalid(xas))
  762. return;
  763. if (node) {
  764. unsigned int offset = xas->xa_offset;
  765. while (++offset < XA_CHUNK_SIZE) {
  766. if (!xa_is_sibling(xa_entry(xas->xa, node, offset)))
  767. break;
  768. }
  769. xas->xa_index += (offset - xas->xa_offset) << node->shift;
  770. } else {
  771. xas->xa_index++;
  772. }
  773. xas->xa_node = XAS_RESTART;
  774. }
  775. EXPORT_SYMBOL_GPL(xas_pause);
  776. /*
  777. * __xas_prev() - Find the previous entry in the XArray.
  778. * @xas: XArray operation state.
  779. *
  780. * Helper function for xas_prev() which handles all the complex cases
  781. * out of line.
  782. */
  783. void *__xas_prev(struct xa_state *xas)
  784. {
  785. void *entry;
  786. if (!xas_frozen(xas->xa_node))
  787. xas->xa_index--;
  788. if (xas_not_node(xas->xa_node))
  789. return xas_load(xas);
  790. if (xas->xa_offset != get_offset(xas->xa_index, xas->xa_node))
  791. xas->xa_offset--;
  792. while (xas->xa_offset == 255) {
  793. xas->xa_offset = xas->xa_node->offset - 1;
  794. xas->xa_node = xa_parent(xas->xa, xas->xa_node);
  795. if (!xas->xa_node)
  796. return set_bounds(xas);
  797. }
  798. for (;;) {
  799. entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
  800. if (!xa_is_node(entry))
  801. return entry;
  802. xas->xa_node = xa_to_node(entry);
  803. xas_set_offset(xas);
  804. }
  805. }
  806. EXPORT_SYMBOL_GPL(__xas_prev);
  807. /*
  808. * __xas_next() - Find the next entry in the XArray.
  809. * @xas: XArray operation state.
  810. *
  811. * Helper function for xas_next() which handles all the complex cases
  812. * out of line.
  813. */
  814. void *__xas_next(struct xa_state *xas)
  815. {
  816. void *entry;
  817. if (!xas_frozen(xas->xa_node))
  818. xas->xa_index++;
  819. if (xas_not_node(xas->xa_node))
  820. return xas_load(xas);
  821. if (xas->xa_offset != get_offset(xas->xa_index, xas->xa_node))
  822. xas->xa_offset++;
  823. while (xas->xa_offset == XA_CHUNK_SIZE) {
  824. xas->xa_offset = xas->xa_node->offset + 1;
  825. xas->xa_node = xa_parent(xas->xa, xas->xa_node);
  826. if (!xas->xa_node)
  827. return set_bounds(xas);
  828. }
  829. for (;;) {
  830. entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
  831. if (!xa_is_node(entry))
  832. return entry;
  833. xas->xa_node = xa_to_node(entry);
  834. xas_set_offset(xas);
  835. }
  836. }
  837. EXPORT_SYMBOL_GPL(__xas_next);
  838. /**
  839. * xas_find() - Find the next present entry in the XArray.
  840. * @xas: XArray operation state.
  841. * @max: Highest index to return.
  842. *
  843. * If the @xas has not yet been walked to an entry, return the entry
  844. * which has an index >= xas.xa_index. If it has been walked, the entry
  845. * currently being pointed at has been processed, and so we move to the
  846. * next entry.
  847. *
  848. * If no entry is found and the array is smaller than @max, the iterator
  849. * is set to the smallest index not yet in the array. This allows @xas
  850. * to be immediately passed to xas_store().
  851. *
  852. * Return: The entry, if found, otherwise %NULL.
  853. */
  854. void *xas_find(struct xa_state *xas, unsigned long max)
  855. {
  856. void *entry;
  857. if (xas_error(xas))
  858. return NULL;
  859. if (!xas->xa_node) {
  860. xas->xa_index = 1;
  861. return set_bounds(xas);
  862. } else if (xas_top(xas->xa_node)) {
  863. entry = xas_load(xas);
  864. if (entry || xas_not_node(xas->xa_node))
  865. return entry;
  866. } else if (!xas->xa_node->shift &&
  867. xas->xa_offset != (xas->xa_index & XA_CHUNK_MASK)) {
  868. xas->xa_offset = ((xas->xa_index - 1) & XA_CHUNK_MASK) + 1;
  869. }
  870. xas_advance(xas);
  871. while (xas->xa_node && (xas->xa_index <= max)) {
  872. if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) {
  873. xas->xa_offset = xas->xa_node->offset + 1;
  874. xas->xa_node = xa_parent(xas->xa, xas->xa_node);
  875. continue;
  876. }
  877. entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
  878. if (xa_is_node(entry)) {
  879. xas->xa_node = xa_to_node(entry);
  880. xas->xa_offset = 0;
  881. continue;
  882. }
  883. if (entry && !xa_is_sibling(entry))
  884. return entry;
  885. xas_advance(xas);
  886. }
  887. if (!xas->xa_node)
  888. xas->xa_node = XAS_BOUNDS;
  889. return NULL;
  890. }
  891. EXPORT_SYMBOL_GPL(xas_find);
  892. /**
  893. * xas_find_marked() - Find the next marked entry in the XArray.
  894. * @xas: XArray operation state.
  895. * @max: Highest index to return.
  896. * @mark: Mark number to search for.
  897. *
  898. * If the @xas has not yet been walked to an entry, return the marked entry
  899. * which has an index >= xas.xa_index. If it has been walked, the entry
  900. * currently being pointed at has been processed, and so we return the
  901. * first marked entry with an index > xas.xa_index.
  902. *
  903. * If no marked entry is found and the array is smaller than @max, @xas is
  904. * set to the bounds state and xas->xa_index is set to the smallest index
  905. * not yet in the array. This allows @xas to be immediately passed to
  906. * xas_store().
  907. *
  908. * If no entry is found before @max is reached, @xas is set to the restart
  909. * state.
  910. *
  911. * Return: The entry, if found, otherwise %NULL.
  912. */
  913. void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark)
  914. {
  915. bool advance = true;
  916. unsigned int offset;
  917. void *entry;
  918. if (xas_error(xas))
  919. return NULL;
  920. if (!xas->xa_node) {
  921. xas->xa_index = 1;
  922. goto out;
  923. } else if (xas_top(xas->xa_node)) {
  924. advance = false;
  925. entry = xa_head(xas->xa);
  926. xas->xa_node = NULL;
  927. if (xas->xa_index > max_index(entry))
  928. goto bounds;
  929. if (!xa_is_node(entry)) {
  930. if (xa_marked(xas->xa, mark))
  931. return entry;
  932. xas->xa_index = 1;
  933. goto out;
  934. }
  935. xas->xa_node = xa_to_node(entry);
  936. xas->xa_offset = xas->xa_index >> xas->xa_node->shift;
  937. }
  938. while (xas->xa_index <= max) {
  939. if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) {
  940. xas->xa_offset = xas->xa_node->offset + 1;
  941. xas->xa_node = xa_parent(xas->xa, xas->xa_node);
  942. if (!xas->xa_node)
  943. break;
  944. advance = false;
  945. continue;
  946. }
  947. if (!advance) {
  948. entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
  949. if (xa_is_sibling(entry)) {
  950. xas->xa_offset = xa_to_sibling(entry);
  951. xas_move_index(xas, xas->xa_offset);
  952. }
  953. }
  954. offset = xas_find_chunk(xas, advance, mark);
  955. if (offset > xas->xa_offset) {
  956. advance = false;
  957. xas_move_index(xas, offset);
  958. /* Mind the wrap */
  959. if ((xas->xa_index - 1) >= max)
  960. goto max;
  961. xas->xa_offset = offset;
  962. if (offset == XA_CHUNK_SIZE)
  963. continue;
  964. }
  965. entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
  966. if (!xa_is_node(entry))
  967. return entry;
  968. xas->xa_node = xa_to_node(entry);
  969. xas_set_offset(xas);
  970. }
  971. out:
  972. if (!max)
  973. goto max;
  974. bounds:
  975. xas->xa_node = XAS_BOUNDS;
  976. return NULL;
  977. max:
  978. xas->xa_node = XAS_RESTART;
  979. return NULL;
  980. }
  981. EXPORT_SYMBOL_GPL(xas_find_marked);
  982. /**
  983. * xas_find_conflict() - Find the next present entry in a range.
  984. * @xas: XArray operation state.
  985. *
  986. * The @xas describes both a range and a position within that range.
  987. *
  988. * Context: Any context. Expects xa_lock to be held.
  989. * Return: The next entry in the range covered by @xas or %NULL.
  990. */
  991. void *xas_find_conflict(struct xa_state *xas)
  992. {
  993. void *curr;
  994. if (xas_error(xas))
  995. return NULL;
  996. if (!xas->xa_node)
  997. return NULL;
  998. if (xas_top(xas->xa_node)) {
  999. curr = xas_start(xas);
  1000. if (!curr)
  1001. return NULL;
  1002. while (xa_is_node(curr)) {
  1003. struct xa_node *node = xa_to_node(curr);
  1004. curr = xas_descend(xas, node);
  1005. }
  1006. if (curr)
  1007. return curr;
  1008. }
  1009. if (xas->xa_node->shift > xas->xa_shift)
  1010. return NULL;
  1011. for (;;) {
  1012. if (xas->xa_node->shift == xas->xa_shift) {
  1013. if ((xas->xa_offset & xas->xa_sibs) == xas->xa_sibs)
  1014. break;
  1015. } else if (xas->xa_offset == XA_CHUNK_MASK) {
  1016. xas->xa_offset = xas->xa_node->offset;
  1017. xas->xa_node = xa_parent_locked(xas->xa, xas->xa_node);
  1018. if (!xas->xa_node)
  1019. break;
  1020. continue;
  1021. }
  1022. curr = xa_entry_locked(xas->xa, xas->xa_node, ++xas->xa_offset);
  1023. if (xa_is_sibling(curr))
  1024. continue;
  1025. while (xa_is_node(curr)) {
  1026. xas->xa_node = xa_to_node(curr);
  1027. xas->xa_offset = 0;
  1028. curr = xa_entry_locked(xas->xa, xas->xa_node, 0);
  1029. }
  1030. if (curr)
  1031. return curr;
  1032. }
  1033. xas->xa_offset -= xas->xa_sibs;
  1034. return NULL;
  1035. }
  1036. EXPORT_SYMBOL_GPL(xas_find_conflict);
  1037. /**
  1038. * xa_init_flags() - Initialise an empty XArray with flags.
  1039. * @xa: XArray.
  1040. * @flags: XA_FLAG values.
  1041. *
  1042. * If you need to initialise an XArray with special flags (eg you need
  1043. * to take the lock from interrupt context), use this function instead
  1044. * of xa_init().
  1045. *
  1046. * Context: Any context.
  1047. */
  1048. void xa_init_flags(struct xarray *xa, gfp_t flags)
  1049. {
  1050. unsigned int lock_type;
  1051. static struct lock_class_key xa_lock_irq;
  1052. static struct lock_class_key xa_lock_bh;
  1053. spin_lock_init(&xa->xa_lock);
  1054. xa->xa_flags = flags;
  1055. xa->xa_head = NULL;
  1056. lock_type = xa_lock_type(xa);
  1057. if (lock_type == XA_LOCK_IRQ)
  1058. lockdep_set_class(&xa->xa_lock, &xa_lock_irq);
  1059. else if (lock_type == XA_LOCK_BH)
  1060. lockdep_set_class(&xa->xa_lock, &xa_lock_bh);
  1061. }
  1062. EXPORT_SYMBOL(xa_init_flags);
  1063. /**
  1064. * xa_load() - Load an entry from an XArray.
  1065. * @xa: XArray.
  1066. * @index: index into array.
  1067. *
  1068. * Context: Any context. Takes and releases the RCU lock.
  1069. * Return: The entry at @index in @xa.
  1070. */
  1071. void *xa_load(struct xarray *xa, unsigned long index)
  1072. {
  1073. XA_STATE(xas, xa, index);
  1074. void *entry;
  1075. rcu_read_lock();
  1076. do {
  1077. entry = xas_load(&xas);
  1078. } while (xas_retry(&xas, entry));
  1079. rcu_read_unlock();
  1080. return entry;
  1081. }
  1082. EXPORT_SYMBOL(xa_load);
  1083. static void *xas_result(struct xa_state *xas, void *curr)
  1084. {
  1085. XA_NODE_BUG_ON(xas->xa_node, xa_is_internal(curr));
  1086. if (xas_error(xas))
  1087. curr = xas->xa_node;
  1088. return curr;
  1089. }
  1090. /**
  1091. * __xa_erase() - Erase this entry from the XArray while locked.
  1092. * @xa: XArray.
  1093. * @index: Index into array.
  1094. *
  1095. * If the entry at this index is a multi-index entry then all indices will
  1096. * be erased, and the entry will no longer be a multi-index entry.
  1097. * This function expects the xa_lock to be held on entry.
  1098. *
  1099. * Context: Any context. Expects xa_lock to be held on entry. May
  1100. * release and reacquire xa_lock if @gfp flags permit.
  1101. * Return: The old entry at this index.
  1102. */
  1103. void *__xa_erase(struct xarray *xa, unsigned long index)
  1104. {
  1105. XA_STATE(xas, xa, index);
  1106. return xas_result(&xas, xas_store(&xas, NULL));
  1107. }
  1108. EXPORT_SYMBOL_GPL(__xa_erase);
  1109. /**
  1110. * xa_store() - Store this entry in the XArray.
  1111. * @xa: XArray.
  1112. * @index: Index into array.
  1113. * @entry: New entry.
  1114. * @gfp: Memory allocation flags.
  1115. *
  1116. * After this function returns, loads from this index will return @entry.
  1117. * Storing into an existing multislot entry updates the entry of every index.
  1118. * The marks associated with @index are unaffected unless @entry is %NULL.
  1119. *
  1120. * Context: Process context. Takes and releases the xa_lock. May sleep
  1121. * if the @gfp flags permit.
  1122. * Return: The old entry at this index on success, xa_err(-EINVAL) if @entry
  1123. * cannot be stored in an XArray, or xa_err(-ENOMEM) if memory allocation
  1124. * failed.
  1125. */
  1126. void *xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
  1127. {
  1128. XA_STATE(xas, xa, index);
  1129. void *curr;
  1130. if (WARN_ON_ONCE(xa_is_internal(entry)))
  1131. return XA_ERROR(-EINVAL);
  1132. do {
  1133. xas_lock(&xas);
  1134. curr = xas_store(&xas, entry);
  1135. xas_unlock(&xas);
  1136. } while (xas_nomem(&xas, gfp));
  1137. return xas_result(&xas, curr);
  1138. }
  1139. EXPORT_SYMBOL(xa_store);
  1140. /**
  1141. * __xa_store() - Store this entry in the XArray.
  1142. * @xa: XArray.
  1143. * @index: Index into array.
  1144. * @entry: New entry.
  1145. * @gfp: Memory allocation flags.
  1146. *
  1147. * You must already be holding the xa_lock when calling this function.
  1148. * It will drop the lock if needed to allocate memory, and then reacquire
  1149. * it afterwards.
  1150. *
  1151. * Context: Any context. Expects xa_lock to be held on entry. May
  1152. * release and reacquire xa_lock if @gfp flags permit.
  1153. * Return: The old entry at this index or xa_err() if an error happened.
  1154. */
  1155. void *__xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
  1156. {
  1157. XA_STATE(xas, xa, index);
  1158. void *curr;
  1159. if (WARN_ON_ONCE(xa_is_internal(entry)))
  1160. return XA_ERROR(-EINVAL);
  1161. do {
  1162. curr = xas_store(&xas, entry);
  1163. } while (__xas_nomem(&xas, gfp));
  1164. return xas_result(&xas, curr);
  1165. }
  1166. EXPORT_SYMBOL(__xa_store);
  1167. /**
  1168. * xa_cmpxchg() - Conditionally replace an entry in the XArray.
  1169. * @xa: XArray.
  1170. * @index: Index into array.
  1171. * @old: Old value to test against.
  1172. * @entry: New value to place in array.
  1173. * @gfp: Memory allocation flags.
  1174. *
  1175. * If the entry at @index is the same as @old, replace it with @entry.
  1176. * If the return value is equal to @old, then the exchange was successful.
  1177. *
  1178. * Context: Process context. Takes and releases the xa_lock. May sleep
  1179. * if the @gfp flags permit.
  1180. * Return: The old value at this index or xa_err() if an error happened.
  1181. */
  1182. void *xa_cmpxchg(struct xarray *xa, unsigned long index,
  1183. void *old, void *entry, gfp_t gfp)
  1184. {
  1185. XA_STATE(xas, xa, index);
  1186. void *curr;
  1187. if (WARN_ON_ONCE(xa_is_internal(entry)))
  1188. return XA_ERROR(-EINVAL);
  1189. do {
  1190. xas_lock(&xas);
  1191. curr = xas_load(&xas);
  1192. if (curr == old)
  1193. xas_store(&xas, entry);
  1194. xas_unlock(&xas);
  1195. } while (xas_nomem(&xas, gfp));
  1196. return xas_result(&xas, curr);
  1197. }
  1198. EXPORT_SYMBOL(xa_cmpxchg);
  1199. /**
  1200. * __xa_cmpxchg() - Store this entry in the XArray.
  1201. * @xa: XArray.
  1202. * @index: Index into array.
  1203. * @old: Old value to test against.
  1204. * @entry: New entry.
  1205. * @gfp: Memory allocation flags.
  1206. *
  1207. * You must already be holding the xa_lock when calling this function.
  1208. * It will drop the lock if needed to allocate memory, and then reacquire
  1209. * it afterwards.
  1210. *
  1211. * Context: Any context. Expects xa_lock to be held on entry. May
  1212. * release and reacquire xa_lock if @gfp flags permit.
  1213. * Return: The old entry at this index or xa_err() if an error happened.
  1214. */
  1215. void *__xa_cmpxchg(struct xarray *xa, unsigned long index,
  1216. void *old, void *entry, gfp_t gfp)
  1217. {
  1218. XA_STATE(xas, xa, index);
  1219. void *curr;
  1220. if (WARN_ON_ONCE(xa_is_internal(entry)))
  1221. return XA_ERROR(-EINVAL);
  1222. do {
  1223. curr = xas_load(&xas);
  1224. if (curr == old)
  1225. xas_store(&xas, entry);
  1226. } while (__xas_nomem(&xas, gfp));
  1227. return xas_result(&xas, curr);
  1228. }
  1229. EXPORT_SYMBOL(__xa_cmpxchg);
  1230. /**
  1231. * __xa_set_mark() - Set this mark on this entry while locked.
  1232. * @xa: XArray.
  1233. * @index: Index of entry.
  1234. * @mark: Mark number.
  1235. *
  1236. * Attempting to set a mark on a NULL entry does not succeed.
  1237. *
  1238. * Context: Any context. Expects xa_lock to be held on entry.
  1239. */
  1240. void __xa_set_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
  1241. {
  1242. XA_STATE(xas, xa, index);
  1243. void *entry = xas_load(&xas);
  1244. if (entry)
  1245. xas_set_mark(&xas, mark);
  1246. }
  1247. EXPORT_SYMBOL_GPL(__xa_set_mark);
  1248. /**
  1249. * __xa_clear_mark() - Clear this mark on this entry while locked.
  1250. * @xa: XArray.
  1251. * @index: Index of entry.
  1252. * @mark: Mark number.
  1253. *
  1254. * Context: Any context. Expects xa_lock to be held on entry.
  1255. */
  1256. void __xa_clear_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
  1257. {
  1258. XA_STATE(xas, xa, index);
  1259. void *entry = xas_load(&xas);
  1260. if (entry)
  1261. xas_clear_mark(&xas, mark);
  1262. }
  1263. EXPORT_SYMBOL_GPL(__xa_clear_mark);
  1264. /**
  1265. * xa_get_mark() - Inquire whether this mark is set on this entry.
  1266. * @xa: XArray.
  1267. * @index: Index of entry.
  1268. * @mark: Mark number.
  1269. *
  1270. * This function uses the RCU read lock, so the result may be out of date
  1271. * by the time it returns. If you need the result to be stable, use a lock.
  1272. *
  1273. * Context: Any context. Takes and releases the RCU lock.
  1274. * Return: True if the entry at @index has this mark set, false if it doesn't.
  1275. */
  1276. bool xa_get_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
  1277. {
  1278. XA_STATE(xas, xa, index);
  1279. void *entry;
  1280. rcu_read_lock();
  1281. entry = xas_start(&xas);
  1282. while (xas_get_mark(&xas, mark)) {
  1283. if (!xa_is_node(entry))
  1284. goto found;
  1285. entry = xas_descend(&xas, xa_to_node(entry));
  1286. }
  1287. rcu_read_unlock();
  1288. return false;
  1289. found:
  1290. rcu_read_unlock();
  1291. return true;
  1292. }
  1293. EXPORT_SYMBOL(xa_get_mark);
  1294. /**
  1295. * xa_set_mark() - Set this mark on this entry.
  1296. * @xa: XArray.
  1297. * @index: Index of entry.
  1298. * @mark: Mark number.
  1299. *
  1300. * Attempting to set a mark on a NULL entry does not succeed.
  1301. *
  1302. * Context: Process context. Takes and releases the xa_lock.
  1303. */
  1304. void xa_set_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
  1305. {
  1306. xa_lock(xa);
  1307. __xa_set_mark(xa, index, mark);
  1308. xa_unlock(xa);
  1309. }
  1310. EXPORT_SYMBOL(xa_set_mark);
  1311. /**
  1312. * xa_clear_mark() - Clear this mark on this entry.
  1313. * @xa: XArray.
  1314. * @index: Index of entry.
  1315. * @mark: Mark number.
  1316. *
  1317. * Clearing a mark always succeeds.
  1318. *
  1319. * Context: Process context. Takes and releases the xa_lock.
  1320. */
  1321. void xa_clear_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
  1322. {
  1323. xa_lock(xa);
  1324. __xa_clear_mark(xa, index, mark);
  1325. xa_unlock(xa);
  1326. }
  1327. EXPORT_SYMBOL(xa_clear_mark);
  1328. /**
  1329. * xa_find() - Search the XArray for an entry.
  1330. * @xa: XArray.
  1331. * @indexp: Pointer to an index.
  1332. * @max: Maximum index to search to.
  1333. * @filter: Selection criterion.
  1334. *
  1335. * Finds the entry in @xa which matches the @filter, and has the lowest
  1336. * index that is at least @indexp and no more than @max.
  1337. * If an entry is found, @indexp is updated to be the index of the entry.
  1338. * This function is protected by the RCU read lock, so it may not find
  1339. * entries which are being simultaneously added. It will not return an
  1340. * %XA_RETRY_ENTRY; if you need to see retry entries, use xas_find().
  1341. *
  1342. * Context: Any context. Takes and releases the RCU lock.
  1343. * Return: The entry, if found, otherwise %NULL.
  1344. */
  1345. void *xa_find(struct xarray *xa, unsigned long *indexp,
  1346. unsigned long max, xa_mark_t filter)
  1347. {
  1348. XA_STATE(xas, xa, *indexp);
  1349. void *entry;
  1350. rcu_read_lock();
  1351. do {
  1352. if ((__force unsigned int)filter < XA_MAX_MARKS)
  1353. entry = xas_find_marked(&xas, max, filter);
  1354. else
  1355. entry = xas_find(&xas, max);
  1356. } while (xas_retry(&xas, entry));
  1357. rcu_read_unlock();
  1358. if (entry)
  1359. *indexp = xas.xa_index;
  1360. return entry;
  1361. }
  1362. EXPORT_SYMBOL(xa_find);
  1363. /**
  1364. * xa_find_after() - Search the XArray for a present entry.
  1365. * @xa: XArray.
  1366. * @indexp: Pointer to an index.
  1367. * @max: Maximum index to search to.
  1368. * @filter: Selection criterion.
  1369. *
  1370. * Finds the entry in @xa which matches the @filter and has the lowest
  1371. * index that is above @indexp and no more than @max.
  1372. * If an entry is found, @indexp is updated to be the index of the entry.
  1373. * This function is protected by the RCU read lock, so it may miss entries
  1374. * which are being simultaneously added. It will not return an
  1375. * %XA_RETRY_ENTRY; if you need to see retry entries, use xas_find().
  1376. *
  1377. * Context: Any context. Takes and releases the RCU lock.
  1378. * Return: The pointer, if found, otherwise %NULL.
  1379. */
  1380. void *xa_find_after(struct xarray *xa, unsigned long *indexp,
  1381. unsigned long max, xa_mark_t filter)
  1382. {
  1383. XA_STATE(xas, xa, *indexp + 1);
  1384. void *entry;
  1385. rcu_read_lock();
  1386. for (;;) {
  1387. if ((__force unsigned int)filter < XA_MAX_MARKS)
  1388. entry = xas_find_marked(&xas, max, filter);
  1389. else
  1390. entry = xas_find(&xas, max);
  1391. if (xas.xa_shift) {
  1392. if (xas.xa_index & ((1UL << xas.xa_shift) - 1))
  1393. continue;
  1394. } else {
  1395. if (xas.xa_offset < (xas.xa_index & XA_CHUNK_MASK))
  1396. continue;
  1397. }
  1398. if (!xas_retry(&xas, entry))
  1399. break;
  1400. }
  1401. rcu_read_unlock();
  1402. if (entry)
  1403. *indexp = xas.xa_index;
  1404. return entry;
  1405. }
  1406. EXPORT_SYMBOL(xa_find_after);
  1407. static unsigned int xas_extract_present(struct xa_state *xas, void **dst,
  1408. unsigned long max, unsigned int n)
  1409. {
  1410. void *entry;
  1411. unsigned int i = 0;
  1412. rcu_read_lock();
  1413. xas_for_each(xas, entry, max) {
  1414. if (xas_retry(xas, entry))
  1415. continue;
  1416. dst[i++] = entry;
  1417. if (i == n)
  1418. break;
  1419. }
  1420. rcu_read_unlock();
  1421. return i;
  1422. }
  1423. static unsigned int xas_extract_marked(struct xa_state *xas, void **dst,
  1424. unsigned long max, unsigned int n, xa_mark_t mark)
  1425. {
  1426. void *entry;
  1427. unsigned int i = 0;
  1428. rcu_read_lock();
  1429. xas_for_each_marked(xas, entry, max, mark) {
  1430. if (xas_retry(xas, entry))
  1431. continue;
  1432. dst[i++] = entry;
  1433. if (i == n)
  1434. break;
  1435. }
  1436. rcu_read_unlock();
  1437. return i;
  1438. }
  1439. /**
  1440. * xa_extract() - Copy selected entries from the XArray into a normal array.
  1441. * @xa: The source XArray to copy from.
  1442. * @dst: The buffer to copy entries into.
  1443. * @start: The first index in the XArray eligible to be selected.
  1444. * @max: The last index in the XArray eligible to be selected.
  1445. * @n: The maximum number of entries to copy.
  1446. * @filter: Selection criterion.
  1447. *
  1448. * Copies up to @n entries that match @filter from the XArray. The
  1449. * copied entries will have indices between @start and @max, inclusive.
  1450. *
  1451. * The @filter may be an XArray mark value, in which case entries which are
  1452. * marked with that mark will be copied. It may also be %XA_PRESENT, in
  1453. * which case all entries which are not NULL will be copied.
  1454. *
  1455. * The entries returned may not represent a snapshot of the XArray at a
  1456. * moment in time. For example, if another thread stores to index 5, then
  1457. * index 10, calling xa_extract() may return the old contents of index 5
  1458. * and the new contents of index 10. Indices not modified while this
  1459. * function is running will not be skipped.
  1460. *
  1461. * If you need stronger guarantees, holding the xa_lock across calls to this
  1462. * function will prevent concurrent modification.
  1463. *
  1464. * Context: Any context. Takes and releases the RCU lock.
  1465. * Return: The number of entries copied.
  1466. */
  1467. unsigned int xa_extract(struct xarray *xa, void **dst, unsigned long start,
  1468. unsigned long max, unsigned int n, xa_mark_t filter)
  1469. {
  1470. XA_STATE(xas, xa, start);
  1471. if (!n)
  1472. return 0;
  1473. if ((__force unsigned int)filter < XA_MAX_MARKS)
  1474. return xas_extract_marked(&xas, dst, max, n, filter);
  1475. return xas_extract_present(&xas, dst, max, n);
  1476. }
  1477. EXPORT_SYMBOL(xa_extract);
  1478. /**
  1479. * xa_destroy() - Free all internal data structures.
  1480. * @xa: XArray.
  1481. *
  1482. * After calling this function, the XArray is empty and has freed all memory
  1483. * allocated for its internal data structures. You are responsible for
  1484. * freeing the objects referenced by the XArray.
  1485. *
  1486. * Context: Any context. Takes and releases the xa_lock, interrupt-safe.
  1487. */
  1488. void xa_destroy(struct xarray *xa)
  1489. {
  1490. XA_STATE(xas, xa, 0);
  1491. unsigned long flags;
  1492. void *entry;
  1493. xas.xa_node = NULL;
  1494. xas_lock_irqsave(&xas, flags);
  1495. entry = xa_head_locked(xa);
  1496. RCU_INIT_POINTER(xa->xa_head, NULL);
  1497. xas_init_marks(&xas);
  1498. /* lockdep checks we're still holding the lock in xas_free_nodes() */
  1499. if (xa_is_node(entry))
  1500. xas_free_nodes(&xas, xa_to_node(entry));
  1501. xas_unlock_irqrestore(&xas, flags);
  1502. }
  1503. EXPORT_SYMBOL(xa_destroy);
  1504. #ifdef XA_DEBUG
  1505. void xa_dump_node(const struct xa_node *node)
  1506. {
  1507. unsigned i, j;
  1508. if (!node)
  1509. return;
  1510. if ((unsigned long)node & 3) {
  1511. pr_cont("node %px\n", node);
  1512. return;
  1513. }
  1514. pr_cont("node %px %s %d parent %px shift %d count %d values %d "
  1515. "array %px list %px %px marks",
  1516. node, node->parent ? "offset" : "max", node->offset,
  1517. node->parent, node->shift, node->count, node->nr_values,
  1518. node->array, node->private_list.prev, node->private_list.next);
  1519. for (i = 0; i < XA_MAX_MARKS; i++)
  1520. for (j = 0; j < XA_MARK_LONGS; j++)
  1521. pr_cont(" %lx", node->marks[i][j]);
  1522. pr_cont("\n");
  1523. }
  1524. void xa_dump_index(unsigned long index, unsigned int shift)
  1525. {
  1526. if (!shift)
  1527. pr_info("%lu: ", index);
  1528. else if (shift >= BITS_PER_LONG)
  1529. pr_info("0-%lu: ", ~0UL);
  1530. else
  1531. pr_info("%lu-%lu: ", index, index | ((1UL << shift) - 1));
  1532. }
  1533. void xa_dump_entry(const void *entry, unsigned long index, unsigned long shift)
  1534. {
  1535. if (!entry)
  1536. return;
  1537. xa_dump_index(index, shift);
  1538. if (xa_is_node(entry)) {
  1539. if (shift == 0) {
  1540. pr_cont("%px\n", entry);
  1541. } else {
  1542. unsigned long i;
  1543. struct xa_node *node = xa_to_node(entry);
  1544. xa_dump_node(node);
  1545. for (i = 0; i < XA_CHUNK_SIZE; i++)
  1546. xa_dump_entry(node->slots[i],
  1547. index + (i << node->shift), node->shift);
  1548. }
  1549. } else if (xa_is_value(entry))
  1550. pr_cont("value %ld (0x%lx) [%px]\n", xa_to_value(entry),
  1551. xa_to_value(entry), entry);
  1552. else if (!xa_is_internal(entry))
  1553. pr_cont("%px\n", entry);
  1554. else if (xa_is_retry(entry))
  1555. pr_cont("retry (%ld)\n", xa_to_internal(entry));
  1556. else if (xa_is_sibling(entry))
  1557. pr_cont("sibling (slot %ld)\n", xa_to_sibling(entry));
  1558. else
  1559. pr_cont("UNKNOWN ENTRY (%px)\n", entry);
  1560. }
  1561. void xa_dump(const struct xarray *xa)
  1562. {
  1563. void *entry = xa->xa_head;
  1564. unsigned int shift = 0;
  1565. pr_info("xarray: %px head %px flags %x marks %d %d %d\n", xa, entry,
  1566. xa->xa_flags, xa_marked(xa, XA_MARK_0),
  1567. xa_marked(xa, XA_MARK_1), xa_marked(xa, XA_MARK_2));
  1568. if (xa_is_node(entry))
  1569. shift = xa_to_node(entry)->shift + XA_CHUNK_SHIFT;
  1570. xa_dump_entry(entry, 0, shift);
  1571. }
  1572. #endif