xarray.h 33 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082
  1. /* SPDX-License-Identifier: GPL-2.0+ */
  2. #ifndef _LINUX_XARRAY_H
  3. #define _LINUX_XARRAY_H
  4. /*
  5. * eXtensible Arrays
  6. * Copyright (c) 2017 Microsoft Corporation
  7. * Author: Matthew Wilcox <willy@infradead.org>
  8. *
  9. * See Documentation/core-api/xarray.rst for how to use the XArray.
  10. */
  11. #include <linux/bug.h>
  12. #include <linux/compiler.h>
  13. #include <linux/gfp.h>
  14. #include <linux/kconfig.h>
  15. #include <linux/kernel.h>
  16. #include <linux/rcupdate.h>
  17. #include <linux/spinlock.h>
  18. #include <linux/types.h>
  19. /*
  20. * The bottom two bits of the entry determine how the XArray interprets
  21. * the contents:
  22. *
  23. * 00: Pointer entry
  24. * 10: Internal entry
  25. * x1: Value entry or tagged pointer
  26. *
  27. * Attempting to store internal entries in the XArray is a bug.
  28. *
  29. * Most internal entries are pointers to the next node in the tree.
  30. * The following internal entries have a special meaning:
  31. *
  32. * 0-62: Sibling entries
  33. * 256: Retry entry
  34. *
  35. * Errors are also represented as internal entries, but use the negative
  36. * space (-4094 to -2). They're never stored in the slots array; only
  37. * returned by the normal API.
  38. */
  39. #define BITS_PER_XA_VALUE (BITS_PER_LONG - 1)
  40. /**
  41. * xa_mk_value() - Create an XArray entry from an integer.
  42. * @v: Value to store in XArray.
  43. *
  44. * Context: Any context.
  45. * Return: An entry suitable for storing in the XArray.
  46. */
  47. static inline void *xa_mk_value(unsigned long v)
  48. {
  49. WARN_ON((long)v < 0);
  50. return (void *)((v << 1) | 1);
  51. }
  52. /**
  53. * xa_to_value() - Get value stored in an XArray entry.
  54. * @entry: XArray entry.
  55. *
  56. * Context: Any context.
  57. * Return: The value stored in the XArray entry.
  58. */
  59. static inline unsigned long xa_to_value(const void *entry)
  60. {
  61. return (unsigned long)entry >> 1;
  62. }
  63. /**
  64. * xa_is_value() - Determine if an entry is a value.
  65. * @entry: XArray entry.
  66. *
  67. * Context: Any context.
  68. * Return: True if the entry is a value, false if it is a pointer.
  69. */
  70. static inline bool xa_is_value(const void *entry)
  71. {
  72. return (unsigned long)entry & 1;
  73. }
  74. /**
  75. * xa_tag_pointer() - Create an XArray entry for a tagged pointer.
  76. * @p: Plain pointer.
  77. * @tag: Tag value (0, 1 or 3).
  78. *
  79. * If the user of the XArray prefers, they can tag their pointers instead
  80. * of storing value entries. Three tags are available (0, 1 and 3).
  81. * These are distinct from the xa_mark_t as they are not replicated up
  82. * through the array and cannot be searched for.
  83. *
  84. * Context: Any context.
  85. * Return: An XArray entry.
  86. */
  87. static inline void *xa_tag_pointer(void *p, unsigned long tag)
  88. {
  89. return (void *)((unsigned long)p | tag);
  90. }
  91. /**
  92. * xa_untag_pointer() - Turn an XArray entry into a plain pointer.
  93. * @entry: XArray entry.
  94. *
  95. * If you have stored a tagged pointer in the XArray, call this function
  96. * to get the untagged version of the pointer.
  97. *
  98. * Context: Any context.
  99. * Return: A pointer.
  100. */
  101. static inline void *xa_untag_pointer(void *entry)
  102. {
  103. return (void *)((unsigned long)entry & ~3UL);
  104. }
  105. /**
  106. * xa_pointer_tag() - Get the tag stored in an XArray entry.
  107. * @entry: XArray entry.
  108. *
  109. * If you have stored a tagged pointer in the XArray, call this function
  110. * to get the tag of that pointer.
  111. *
  112. * Context: Any context.
  113. * Return: A tag.
  114. */
  115. static inline unsigned int xa_pointer_tag(void *entry)
  116. {
  117. return (unsigned long)entry & 3UL;
  118. }
  119. /*
  120. * xa_mk_internal() - Create an internal entry.
  121. * @v: Value to turn into an internal entry.
  122. *
  123. * Context: Any context.
  124. * Return: An XArray internal entry corresponding to this value.
  125. */
  126. static inline void *xa_mk_internal(unsigned long v)
  127. {
  128. return (void *)((v << 2) | 2);
  129. }
  130. /*
  131. * xa_to_internal() - Extract the value from an internal entry.
  132. * @entry: XArray entry.
  133. *
  134. * Context: Any context.
  135. * Return: The value which was stored in the internal entry.
  136. */
  137. static inline unsigned long xa_to_internal(const void *entry)
  138. {
  139. return (unsigned long)entry >> 2;
  140. }
  141. /*
  142. * xa_is_internal() - Is the entry an internal entry?
  143. * @entry: XArray entry.
  144. *
  145. * Context: Any context.
  146. * Return: %true if the entry is an internal entry.
  147. */
  148. static inline bool xa_is_internal(const void *entry)
  149. {
  150. return ((unsigned long)entry & 3) == 2;
  151. }
  152. /**
  153. * xa_is_err() - Report whether an XArray operation returned an error
  154. * @entry: Result from calling an XArray function
  155. *
  156. * If an XArray operation cannot complete an operation, it will return
  157. * a special value indicating an error. This function tells you
  158. * whether an error occurred; xa_err() tells you which error occurred.
  159. *
  160. * Context: Any context.
  161. * Return: %true if the entry indicates an error.
  162. */
  163. static inline bool xa_is_err(const void *entry)
  164. {
  165. return unlikely(xa_is_internal(entry));
  166. }
  167. /**
  168. * xa_err() - Turn an XArray result into an errno.
  169. * @entry: Result from calling an XArray function.
  170. *
  171. * If an XArray operation cannot complete an operation, it will return
  172. * a special pointer value which encodes an errno. This function extracts
  173. * the errno from the pointer value, or returns 0 if the pointer does not
  174. * represent an errno.
  175. *
  176. * Context: Any context.
  177. * Return: A negative errno or 0.
  178. */
  179. static inline int xa_err(void *entry)
  180. {
  181. /* xa_to_internal() would not do sign extension. */
  182. if (xa_is_err(entry))
  183. return (long)entry >> 2;
  184. return 0;
  185. }
  186. typedef unsigned __bitwise xa_mark_t;
  187. #define XA_MARK_0 ((__force xa_mark_t)0U)
  188. #define XA_MARK_1 ((__force xa_mark_t)1U)
  189. #define XA_MARK_2 ((__force xa_mark_t)2U)
  190. #define XA_PRESENT ((__force xa_mark_t)8U)
  191. #define XA_MARK_MAX XA_MARK_2
  192. enum xa_lock_type {
  193. XA_LOCK_IRQ = 1,
  194. XA_LOCK_BH = 2,
  195. };
  196. /*
  197. * Values for xa_flags. The radix tree stores its GFP flags in the xa_flags,
  198. * and we remain compatible with that.
  199. */
  200. #define XA_FLAGS_LOCK_IRQ ((__force gfp_t)XA_LOCK_IRQ)
  201. #define XA_FLAGS_LOCK_BH ((__force gfp_t)XA_LOCK_BH)
  202. #define XA_FLAGS_MARK(mark) ((__force gfp_t)((1U << __GFP_BITS_SHIFT) << \
  203. (__force unsigned)(mark)))
  204. /**
  205. * struct xarray - The anchor of the XArray.
  206. * @xa_lock: Lock that protects the contents of the XArray.
  207. *
  208. * To use the xarray, define it statically or embed it in your data structure.
  209. * It is a very small data structure, so it does not usually make sense to
  210. * allocate it separately and keep a pointer to it in your data structure.
  211. *
  212. * You may use the xa_lock to protect your own data structures as well.
  213. */
  214. /*
  215. * If all of the entries in the array are NULL, @xa_head is a NULL pointer.
  216. * If the only non-NULL entry in the array is at index 0, @xa_head is that
  217. * entry. If any other entry in the array is non-NULL, @xa_head points
  218. * to an @xa_node.
  219. */
  220. struct xarray {
  221. spinlock_t xa_lock;
  222. /* private: The rest of the data structure is not to be used directly. */
  223. gfp_t xa_flags;
  224. void __rcu * xa_head;
  225. };
  226. #define XARRAY_INIT(name, flags) { \
  227. .xa_lock = __SPIN_LOCK_UNLOCKED(name.xa_lock), \
  228. .xa_flags = flags, \
  229. .xa_head = NULL, \
  230. }
  231. /**
  232. * DEFINE_XARRAY_FLAGS() - Define an XArray with custom flags.
  233. * @name: A string that names your XArray.
  234. * @flags: XA_FLAG values.
  235. *
  236. * This is intended for file scope definitions of XArrays. It declares
  237. * and initialises an empty XArray with the chosen name and flags. It is
  238. * equivalent to calling xa_init_flags() on the array, but it does the
  239. * initialisation at compiletime instead of runtime.
  240. */
  241. #define DEFINE_XARRAY_FLAGS(name, flags) \
  242. struct xarray name = XARRAY_INIT(name, flags)
  243. /**
  244. * DEFINE_XARRAY() - Define an XArray.
  245. * @name: A string that names your XArray.
  246. *
  247. * This is intended for file scope definitions of XArrays. It declares
  248. * and initialises an empty XArray with the chosen name. It is equivalent
  249. * to calling xa_init() on the array, but it does the initialisation at
  250. * compiletime instead of runtime.
  251. */
  252. #define DEFINE_XARRAY(name) DEFINE_XARRAY_FLAGS(name, 0)
  253. void xa_init_flags(struct xarray *, gfp_t flags);
  254. void *xa_load(struct xarray *, unsigned long index);
  255. void *xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
  256. void *xa_cmpxchg(struct xarray *, unsigned long index,
  257. void *old, void *entry, gfp_t);
  258. bool xa_get_mark(struct xarray *, unsigned long index, xa_mark_t);
  259. void xa_set_mark(struct xarray *, unsigned long index, xa_mark_t);
  260. void xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t);
  261. void *xa_find(struct xarray *xa, unsigned long *index,
  262. unsigned long max, xa_mark_t) __attribute__((nonnull(2)));
  263. void *xa_find_after(struct xarray *xa, unsigned long *index,
  264. unsigned long max, xa_mark_t) __attribute__((nonnull(2)));
  265. /**
  266. * xa_init() - Initialise an empty XArray.
  267. * @xa: XArray.
  268. *
  269. * An empty XArray is full of NULL entries.
  270. *
  271. * Context: Any context.
  272. */
  273. static inline void xa_init(struct xarray *xa)
  274. {
  275. xa_init_flags(xa, 0);
  276. }
  277. /**
  278. * xa_empty() - Determine if an array has any present entries.
  279. * @xa: XArray.
  280. *
  281. * Context: Any context.
  282. * Return: %true if the array contains only NULL pointers.
  283. */
  284. static inline bool xa_empty(const struct xarray *xa)
  285. {
  286. return xa->xa_head == NULL;
  287. }
  288. /**
  289. * xa_marked() - Inquire whether any entry in this array has a mark set
  290. * @xa: Array
  291. * @mark: Mark value
  292. *
  293. * Context: Any context.
  294. * Return: %true if any entry has this mark set.
  295. */
  296. static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark)
  297. {
  298. return xa->xa_flags & XA_FLAGS_MARK(mark);
  299. }
  300. /**
  301. * xa_erase() - Erase this entry from the XArray.
  302. * @xa: XArray.
  303. * @index: Index of entry.
  304. *
  305. * This function is the equivalent of calling xa_store() with %NULL as
  306. * the third argument. The XArray does not need to allocate memory, so
  307. * the user does not need to provide GFP flags.
  308. *
  309. * Context: Process context. Takes and releases the xa_lock.
  310. * Return: The entry which used to be at this index.
  311. */
  312. static inline void *xa_erase(struct xarray *xa, unsigned long index)
  313. {
  314. return xa_store(xa, index, NULL, 0);
  315. }
  316. /**
  317. * xa_insert() - Store this entry in the XArray unless another entry is
  318. * already present.
  319. * @xa: XArray.
  320. * @index: Index into array.
  321. * @entry: New entry.
  322. * @gfp: Memory allocation flags.
  323. *
  324. * If you would rather see the existing entry in the array, use xa_cmpxchg().
  325. * This function is for users who don't care what the entry is, only that
  326. * one is present.
  327. *
  328. * Context: Process context. Takes and releases the xa_lock.
  329. * May sleep if the @gfp flags permit.
  330. * Return: 0 if the store succeeded. -EEXIST if another entry was present.
  331. * -ENOMEM if memory could not be allocated.
  332. */
  333. static inline int xa_insert(struct xarray *xa, unsigned long index,
  334. void *entry, gfp_t gfp)
  335. {
  336. void *curr = xa_cmpxchg(xa, index, NULL, entry, gfp);
  337. if (!curr)
  338. return 0;
  339. if (xa_is_err(curr))
  340. return xa_err(curr);
  341. return -EEXIST;
  342. }
  343. /**
  344. * xa_for_each() - Iterate over a portion of an XArray.
  345. * @xa: XArray.
  346. * @entry: Entry retrieved from array.
  347. * @index: Index of @entry.
  348. * @max: Maximum index to retrieve from array.
  349. * @filter: Selection criterion.
  350. *
  351. * Initialise @index to the lowest index you want to retrieve from the
  352. * array. During the iteration, @entry will have the value of the entry
  353. * stored in @xa at @index. The iteration will skip all entries in the
  354. * array which do not match @filter. You may modify @index during the
  355. * iteration if you want to skip or reprocess indices. It is safe to modify
  356. * the array during the iteration. At the end of the iteration, @entry will
  357. * be set to NULL and @index will have a value less than or equal to max.
  358. *
  359. * xa_for_each() is O(n.log(n)) while xas_for_each() is O(n). You have
  360. * to handle your own locking with xas_for_each(), and if you have to unlock
  361. * after each iteration, it will also end up being O(n.log(n)). xa_for_each()
  362. * will spin if it hits a retry entry; if you intend to see retry entries,
  363. * you should use the xas_for_each() iterator instead. The xas_for_each()
  364. * iterator will expand into more inline code than xa_for_each().
  365. *
  366. * Context: Any context. Takes and releases the RCU lock.
  367. */
  368. #define xa_for_each(xa, entry, index, max, filter) \
  369. for (entry = xa_find(xa, &index, max, filter); entry; \
  370. entry = xa_find_after(xa, &index, max, filter))
  371. #define xa_trylock(xa) spin_trylock(&(xa)->xa_lock)
  372. #define xa_lock(xa) spin_lock(&(xa)->xa_lock)
  373. #define xa_unlock(xa) spin_unlock(&(xa)->xa_lock)
  374. #define xa_lock_bh(xa) spin_lock_bh(&(xa)->xa_lock)
  375. #define xa_unlock_bh(xa) spin_unlock_bh(&(xa)->xa_lock)
  376. #define xa_lock_irq(xa) spin_lock_irq(&(xa)->xa_lock)
  377. #define xa_unlock_irq(xa) spin_unlock_irq(&(xa)->xa_lock)
  378. #define xa_lock_irqsave(xa, flags) \
  379. spin_lock_irqsave(&(xa)->xa_lock, flags)
  380. #define xa_unlock_irqrestore(xa, flags) \
  381. spin_unlock_irqrestore(&(xa)->xa_lock, flags)
  382. /*
  383. * Versions of the normal API which require the caller to hold the
  384. * xa_lock. If the GFP flags allow it, they will drop the lock to
  385. * allocate memory, then reacquire it afterwards. These functions
  386. * may also re-enable interrupts if the XArray flags indicate the
  387. * locking should be interrupt safe.
  388. */
  389. void *__xa_erase(struct xarray *, unsigned long index);
  390. void *__xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
  391. void *__xa_cmpxchg(struct xarray *, unsigned long index, void *old,
  392. void *entry, gfp_t);
  393. void __xa_set_mark(struct xarray *, unsigned long index, xa_mark_t);
  394. void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t);
  395. /**
  396. * __xa_insert() - Store this entry in the XArray unless another entry is
  397. * already present.
  398. * @xa: XArray.
  399. * @index: Index into array.
  400. * @entry: New entry.
  401. * @gfp: Memory allocation flags.
  402. *
  403. * If you would rather see the existing entry in the array, use __xa_cmpxchg().
  404. * This function is for users who don't care what the entry is, only that
  405. * one is present.
  406. *
  407. * Context: Any context. Expects xa_lock to be held on entry. May
  408. * release and reacquire xa_lock if the @gfp flags permit.
  409. * Return: 0 if the store succeeded. -EEXIST if another entry was present.
  410. * -ENOMEM if memory could not be allocated.
  411. */
  412. static inline int __xa_insert(struct xarray *xa, unsigned long index,
  413. void *entry, gfp_t gfp)
  414. {
  415. void *curr = __xa_cmpxchg(xa, index, NULL, entry, gfp);
  416. if (!curr)
  417. return 0;
  418. if (xa_is_err(curr))
  419. return xa_err(curr);
  420. return -EEXIST;
  421. }
  422. /**
  423. * xa_erase_bh() - Erase this entry from the XArray.
  424. * @xa: XArray.
  425. * @index: Index of entry.
  426. *
  427. * This function is the equivalent of calling xa_store() with %NULL as
  428. * the third argument. The XArray does not need to allocate memory, so
  429. * the user does not need to provide GFP flags.
  430. *
  431. * Context: Process context. Takes and releases the xa_lock while
  432. * disabling softirqs.
  433. * Return: The entry which used to be at this index.
  434. */
  435. static inline void *xa_erase_bh(struct xarray *xa, unsigned long index)
  436. {
  437. void *entry;
  438. xa_lock_bh(xa);
  439. entry = __xa_erase(xa, index);
  440. xa_unlock_bh(xa);
  441. return entry;
  442. }
  443. /**
  444. * xa_erase_irq() - Erase this entry from the XArray.
  445. * @xa: XArray.
  446. * @index: Index of entry.
  447. *
  448. * This function is the equivalent of calling xa_store() with %NULL as
  449. * the third argument. The XArray does not need to allocate memory, so
  450. * the user does not need to provide GFP flags.
  451. *
  452. * Context: Process context. Takes and releases the xa_lock while
  453. * disabling interrupts.
  454. * Return: The entry which used to be at this index.
  455. */
  456. static inline void *xa_erase_irq(struct xarray *xa, unsigned long index)
  457. {
  458. void *entry;
  459. xa_lock_irq(xa);
  460. entry = __xa_erase(xa, index);
  461. xa_unlock_irq(xa);
  462. return entry;
  463. }
  464. /* Everything below here is the Advanced API. Proceed with caution. */
  465. /*
  466. * The xarray is constructed out of a set of 'chunks' of pointers. Choosing
  467. * the best chunk size requires some tradeoffs. A power of two recommends
  468. * itself so that we can walk the tree based purely on shifts and masks.
  469. * Generally, the larger the better; as the number of slots per level of the
  470. * tree increases, the less tall the tree needs to be. But that needs to be
  471. * balanced against the memory consumption of each node. On a 64-bit system,
  472. * xa_node is currently 576 bytes, and we get 7 of them per 4kB page. If we
  473. * doubled the number of slots per node, we'd get only 3 nodes per 4kB page.
  474. */
  475. #ifndef XA_CHUNK_SHIFT
  476. #define XA_CHUNK_SHIFT (CONFIG_BASE_SMALL ? 4 : 6)
  477. #endif
  478. #define XA_CHUNK_SIZE (1UL << XA_CHUNK_SHIFT)
  479. #define XA_CHUNK_MASK (XA_CHUNK_SIZE - 1)
  480. #define XA_MAX_MARKS 3
  481. #define XA_MARK_LONGS DIV_ROUND_UP(XA_CHUNK_SIZE, BITS_PER_LONG)
  482. /*
  483. * @count is the count of every non-NULL element in the ->slots array
  484. * whether that is a value entry, a retry entry, a user pointer,
  485. * a sibling entry or a pointer to the next level of the tree.
  486. * @nr_values is the count of every element in ->slots which is
  487. * either a value entry or a sibling of a value entry.
  488. */
  489. struct xa_node {
  490. unsigned char shift; /* Bits remaining in each slot */
  491. unsigned char offset; /* Slot offset in parent */
  492. unsigned char count; /* Total entry count */
  493. unsigned char nr_values; /* Value entry count */
  494. struct xa_node __rcu *parent; /* NULL at top of tree */
  495. struct xarray *array; /* The array we belong to */
  496. union {
  497. struct list_head private_list; /* For tree user */
  498. struct rcu_head rcu_head; /* Used when freeing node */
  499. };
  500. void __rcu *slots[XA_CHUNK_SIZE];
  501. union {
  502. unsigned long tags[XA_MAX_MARKS][XA_MARK_LONGS];
  503. unsigned long marks[XA_MAX_MARKS][XA_MARK_LONGS];
  504. };
  505. };
  506. void xa_dump(const struct xarray *);
  507. void xa_dump_node(const struct xa_node *);
  508. #ifdef XA_DEBUG
  509. #define XA_BUG_ON(xa, x) do { \
  510. if (x) { \
  511. xa_dump(xa); \
  512. BUG(); \
  513. } \
  514. } while (0)
  515. #define XA_NODE_BUG_ON(node, x) do { \
  516. if (x) { \
  517. if (node) xa_dump_node(node); \
  518. BUG(); \
  519. } \
  520. } while (0)
  521. #else
  522. #define XA_BUG_ON(xa, x) do { } while (0)
  523. #define XA_NODE_BUG_ON(node, x) do { } while (0)
  524. #endif
  525. /* Private */
  526. static inline void *xa_head(const struct xarray *xa)
  527. {
  528. return rcu_dereference_check(xa->xa_head,
  529. lockdep_is_held(&xa->xa_lock));
  530. }
  531. /* Private */
  532. static inline void *xa_head_locked(const struct xarray *xa)
  533. {
  534. return rcu_dereference_protected(xa->xa_head,
  535. lockdep_is_held(&xa->xa_lock));
  536. }
  537. /* Private */
  538. static inline void *xa_entry(const struct xarray *xa,
  539. const struct xa_node *node, unsigned int offset)
  540. {
  541. XA_NODE_BUG_ON(node, offset >= XA_CHUNK_SIZE);
  542. return rcu_dereference_check(node->slots[offset],
  543. lockdep_is_held(&xa->xa_lock));
  544. }
  545. /* Private */
  546. static inline void *xa_entry_locked(const struct xarray *xa,
  547. const struct xa_node *node, unsigned int offset)
  548. {
  549. XA_NODE_BUG_ON(node, offset >= XA_CHUNK_SIZE);
  550. return rcu_dereference_protected(node->slots[offset],
  551. lockdep_is_held(&xa->xa_lock));
  552. }
  553. /* Private */
  554. static inline struct xa_node *xa_parent(const struct xarray *xa,
  555. const struct xa_node *node)
  556. {
  557. return rcu_dereference_check(node->parent,
  558. lockdep_is_held(&xa->xa_lock));
  559. }
  560. /* Private */
  561. static inline struct xa_node *xa_parent_locked(const struct xarray *xa,
  562. const struct xa_node *node)
  563. {
  564. return rcu_dereference_protected(node->parent,
  565. lockdep_is_held(&xa->xa_lock));
  566. }
  567. /* Private */
  568. static inline void *xa_mk_node(const struct xa_node *node)
  569. {
  570. return (void *)((unsigned long)node | 2);
  571. }
  572. /* Private */
  573. static inline struct xa_node *xa_to_node(const void *entry)
  574. {
  575. return (struct xa_node *)((unsigned long)entry - 2);
  576. }
  577. /* Private */
  578. static inline bool xa_is_node(const void *entry)
  579. {
  580. return xa_is_internal(entry) && (unsigned long)entry > 4096;
  581. }
  582. /* Private */
  583. static inline void *xa_mk_sibling(unsigned int offset)
  584. {
  585. return xa_mk_internal(offset);
  586. }
  587. /* Private */
  588. static inline unsigned long xa_to_sibling(const void *entry)
  589. {
  590. return xa_to_internal(entry);
  591. }
  592. /**
  593. * xa_is_sibling() - Is the entry a sibling entry?
  594. * @entry: Entry retrieved from the XArray
  595. *
  596. * Return: %true if the entry is a sibling entry.
  597. */
  598. static inline bool xa_is_sibling(const void *entry)
  599. {
  600. return IS_ENABLED(CONFIG_XARRAY_MULTI) && xa_is_internal(entry) &&
  601. (entry < xa_mk_sibling(XA_CHUNK_SIZE - 1));
  602. }
  603. #define XA_RETRY_ENTRY xa_mk_internal(256)
  604. /**
  605. * xa_is_retry() - Is the entry a retry entry?
  606. * @entry: Entry retrieved from the XArray
  607. *
  608. * Return: %true if the entry is a retry entry.
  609. */
  610. static inline bool xa_is_retry(const void *entry)
  611. {
  612. return unlikely(entry == XA_RETRY_ENTRY);
  613. }
  614. /**
  615. * typedef xa_update_node_t - A callback function from the XArray.
  616. * @node: The node which is being processed
  617. *
  618. * This function is called every time the XArray updates the count of
  619. * present and value entries in a node. It allows advanced users to
  620. * maintain the private_list in the node.
  621. *
  622. * Context: The xa_lock is held and interrupts may be disabled.
  623. * Implementations should not drop the xa_lock, nor re-enable
  624. * interrupts.
  625. */
  626. typedef void (*xa_update_node_t)(struct xa_node *node);
  627. /*
  628. * The xa_state is opaque to its users. It contains various different pieces
  629. * of state involved in the current operation on the XArray. It should be
  630. * declared on the stack and passed between the various internal routines.
  631. * The various elements in it should not be accessed directly, but only
  632. * through the provided accessor functions. The below documentation is for
  633. * the benefit of those working on the code, not for users of the XArray.
  634. *
  635. * @xa_node usually points to the xa_node containing the slot we're operating
  636. * on (and @xa_offset is the offset in the slots array). If there is a
  637. * single entry in the array at index 0, there are no allocated xa_nodes to
  638. * point to, and so we store %NULL in @xa_node. @xa_node is set to
  639. * the value %XAS_RESTART if the xa_state is not walked to the correct
  640. * position in the tree of nodes for this operation. If an error occurs
  641. * during an operation, it is set to an %XAS_ERROR value. If we run off the
  642. * end of the allocated nodes, it is set to %XAS_BOUNDS.
  643. */
  644. struct xa_state {
  645. struct xarray *xa;
  646. unsigned long xa_index;
  647. unsigned char xa_shift;
  648. unsigned char xa_sibs;
  649. unsigned char xa_offset;
  650. unsigned char xa_pad; /* Helps gcc generate better code */
  651. struct xa_node *xa_node;
  652. struct xa_node *xa_alloc;
  653. xa_update_node_t xa_update;
  654. };
  655. /*
  656. * We encode errnos in the xas->xa_node. If an error has happened, we need to
  657. * drop the lock to fix it, and once we've done so the xa_state is invalid.
  658. */
  659. #define XA_ERROR(errno) ((struct xa_node *)(((unsigned long)errno << 2) | 2UL))
  660. #define XAS_BOUNDS ((struct xa_node *)1UL)
  661. #define XAS_RESTART ((struct xa_node *)3UL)
  662. #define __XA_STATE(array, index, shift, sibs) { \
  663. .xa = array, \
  664. .xa_index = index, \
  665. .xa_shift = shift, \
  666. .xa_sibs = sibs, \
  667. .xa_offset = 0, \
  668. .xa_pad = 0, \
  669. .xa_node = XAS_RESTART, \
  670. .xa_alloc = NULL, \
  671. .xa_update = NULL \
  672. }
  673. /**
  674. * XA_STATE() - Declare an XArray operation state.
  675. * @name: Name of this operation state (usually xas).
  676. * @array: Array to operate on.
  677. * @index: Initial index of interest.
  678. *
  679. * Declare and initialise an xa_state on the stack.
  680. */
  681. #define XA_STATE(name, array, index) \
  682. struct xa_state name = __XA_STATE(array, index, 0, 0)
  683. /**
  684. * XA_STATE_ORDER() - Declare an XArray operation state.
  685. * @name: Name of this operation state (usually xas).
  686. * @array: Array to operate on.
  687. * @index: Initial index of interest.
  688. * @order: Order of entry.
  689. *
  690. * Declare and initialise an xa_state on the stack. This variant of
  691. * XA_STATE() allows you to specify the 'order' of the element you
  692. * want to operate on.`
  693. */
  694. #define XA_STATE_ORDER(name, array, index, order) \
  695. struct xa_state name = __XA_STATE(array, \
  696. (index >> order) << order, \
  697. order - (order % XA_CHUNK_SHIFT), \
  698. (1U << (order % XA_CHUNK_SHIFT)) - 1)
  699. #define xas_marked(xas, mark) xa_marked((xas)->xa, (mark))
  700. #define xas_trylock(xas) xa_trylock((xas)->xa)
  701. #define xas_lock(xas) xa_lock((xas)->xa)
  702. #define xas_unlock(xas) xa_unlock((xas)->xa)
  703. #define xas_lock_bh(xas) xa_lock_bh((xas)->xa)
  704. #define xas_unlock_bh(xas) xa_unlock_bh((xas)->xa)
  705. #define xas_lock_irq(xas) xa_lock_irq((xas)->xa)
  706. #define xas_unlock_irq(xas) xa_unlock_irq((xas)->xa)
  707. #define xas_lock_irqsave(xas, flags) \
  708. xa_lock_irqsave((xas)->xa, flags)
  709. #define xas_unlock_irqrestore(xas, flags) \
  710. xa_unlock_irqrestore((xas)->xa, flags)
  711. /**
  712. * xas_error() - Return an errno stored in the xa_state.
  713. * @xas: XArray operation state.
  714. *
  715. * Return: 0 if no error has been noted. A negative errno if one has.
  716. */
  717. static inline int xas_error(const struct xa_state *xas)
  718. {
  719. return xa_err(xas->xa_node);
  720. }
  721. /**
  722. * xas_set_err() - Note an error in the xa_state.
  723. * @xas: XArray operation state.
  724. * @err: Negative error number.
  725. *
  726. * Only call this function with a negative @err; zero or positive errors
  727. * will probably not behave the way you think they should. If you want
  728. * to clear the error from an xa_state, use xas_reset().
  729. */
  730. static inline void xas_set_err(struct xa_state *xas, long err)
  731. {
  732. xas->xa_node = XA_ERROR(err);
  733. }
  734. /**
  735. * xas_invalid() - Is the xas in a retry or error state?
  736. * @xas: XArray operation state.
  737. *
  738. * Return: %true if the xas cannot be used for operations.
  739. */
  740. static inline bool xas_invalid(const struct xa_state *xas)
  741. {
  742. return (unsigned long)xas->xa_node & 3;
  743. }
  744. /**
  745. * xas_valid() - Is the xas a valid cursor into the array?
  746. * @xas: XArray operation state.
  747. *
  748. * Return: %true if the xas can be used for operations.
  749. */
  750. static inline bool xas_valid(const struct xa_state *xas)
  751. {
  752. return !xas_invalid(xas);
  753. }
  754. /* True if the pointer is something other than a node */
  755. static inline bool xas_not_node(struct xa_node *node)
  756. {
  757. return ((unsigned long)node & 3) || !node;
  758. }
  759. /* True if the node represents head-of-tree, RESTART or BOUNDS */
  760. static inline bool xas_top(struct xa_node *node)
  761. {
  762. return node <= XAS_RESTART;
  763. }
  764. /**
  765. * xas_reset() - Reset an XArray operation state.
  766. * @xas: XArray operation state.
  767. *
  768. * Resets the error or walk state of the @xas so future walks of the
  769. * array will start from the root. Use this if you have dropped the
  770. * xarray lock and want to reuse the xa_state.
  771. *
  772. * Context: Any context.
  773. */
  774. static inline void xas_reset(struct xa_state *xas)
  775. {
  776. xas->xa_node = XAS_RESTART;
  777. }
  778. /**
  779. * xas_retry() - Retry the operation if appropriate.
  780. * @xas: XArray operation state.
  781. * @entry: Entry from xarray.
  782. *
  783. * The advanced functions may sometimes return an internal entry, such as
  784. * a retry entry or a zero entry. This function sets up the @xas to restart
  785. * the walk from the head of the array if needed.
  786. *
  787. * Context: Any context.
  788. * Return: true if the operation needs to be retried.
  789. */
  790. static inline bool xas_retry(struct xa_state *xas, const void *entry)
  791. {
  792. if (!xa_is_retry(entry))
  793. return false;
  794. xas_reset(xas);
  795. return true;
  796. }
  797. void *xas_load(struct xa_state *);
  798. void *xas_store(struct xa_state *, void *entry);
  799. void *xas_find(struct xa_state *, unsigned long max);
  800. bool xas_get_mark(const struct xa_state *, xa_mark_t);
  801. void xas_set_mark(const struct xa_state *, xa_mark_t);
  802. void xas_clear_mark(const struct xa_state *, xa_mark_t);
  803. void *xas_find_marked(struct xa_state *, unsigned long max, xa_mark_t);
  804. void xas_init_marks(const struct xa_state *);
  805. bool xas_nomem(struct xa_state *, gfp_t);
  806. void xas_pause(struct xa_state *);
  807. /**
  808. * xas_reload() - Refetch an entry from the xarray.
  809. * @xas: XArray operation state.
  810. *
  811. * Use this function to check that a previously loaded entry still has
  812. * the same value. This is useful for the lockless pagecache lookup where
  813. * we walk the array with only the RCU lock to protect us, lock the page,
  814. * then check that the page hasn't moved since we looked it up.
  815. *
  816. * The caller guarantees that @xas is still valid. If it may be in an
  817. * error or restart state, call xas_load() instead.
  818. *
  819. * Return: The entry at this location in the xarray.
  820. */
  821. static inline void *xas_reload(struct xa_state *xas)
  822. {
  823. struct xa_node *node = xas->xa_node;
  824. if (node)
  825. return xa_entry(xas->xa, node, xas->xa_offset);
  826. return xa_head(xas->xa);
  827. }
  828. /**
  829. * xas_set() - Set up XArray operation state for a different index.
  830. * @xas: XArray operation state.
  831. * @index: New index into the XArray.
  832. *
  833. * Move the operation state to refer to a different index. This will
  834. * have the effect of starting a walk from the top; see xas_next()
  835. * to move to an adjacent index.
  836. */
  837. static inline void xas_set(struct xa_state *xas, unsigned long index)
  838. {
  839. xas->xa_index = index;
  840. xas->xa_node = XAS_RESTART;
  841. }
  842. /**
  843. * xas_set_order() - Set up XArray operation state for a multislot entry.
  844. * @xas: XArray operation state.
  845. * @index: Target of the operation.
  846. * @order: Entry occupies 2^@order indices.
  847. */
  848. static inline void xas_set_order(struct xa_state *xas, unsigned long index,
  849. unsigned int order)
  850. {
  851. #ifdef CONFIG_XARRAY_MULTI
  852. xas->xa_index = order < BITS_PER_LONG ? (index >> order) << order : 0;
  853. xas->xa_shift = order - (order % XA_CHUNK_SHIFT);
  854. xas->xa_sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1;
  855. xas->xa_node = XAS_RESTART;
  856. #else
  857. BUG_ON(order > 0);
  858. xas_set(xas, index);
  859. #endif
  860. }
  861. /**
  862. * xas_set_update() - Set up XArray operation state for a callback.
  863. * @xas: XArray operation state.
  864. * @update: Function to call when updating a node.
  865. *
  866. * The XArray can notify a caller after it has updated an xa_node.
  867. * This is advanced functionality and is only needed by the page cache.
  868. */
  869. static inline void xas_set_update(struct xa_state *xas, xa_update_node_t update)
  870. {
  871. xas->xa_update = update;
  872. }
  873. /**
  874. * xas_next_entry() - Advance iterator to next present entry.
  875. * @xas: XArray operation state.
  876. * @max: Highest index to return.
  877. *
  878. * xas_next_entry() is an inline function to optimise xarray traversal for
  879. * speed. It is equivalent to calling xas_find(), and will call xas_find()
  880. * for all the hard cases.
  881. *
  882. * Return: The next present entry after the one currently referred to by @xas.
  883. */
  884. static inline void *xas_next_entry(struct xa_state *xas, unsigned long max)
  885. {
  886. struct xa_node *node = xas->xa_node;
  887. void *entry;
  888. if (unlikely(xas_not_node(node) || node->shift ||
  889. xas->xa_offset != (xas->xa_index & XA_CHUNK_MASK)))
  890. return xas_find(xas, max);
  891. do {
  892. if (unlikely(xas->xa_index >= max))
  893. return xas_find(xas, max);
  894. if (unlikely(xas->xa_offset == XA_CHUNK_MASK))
  895. return xas_find(xas, max);
  896. entry = xa_entry(xas->xa, node, xas->xa_offset + 1);
  897. if (unlikely(xa_is_internal(entry)))
  898. return xas_find(xas, max);
  899. xas->xa_offset++;
  900. xas->xa_index++;
  901. } while (!entry);
  902. return entry;
  903. }
  904. /* Private */
  905. static inline unsigned int xas_find_chunk(struct xa_state *xas, bool advance,
  906. xa_mark_t mark)
  907. {
  908. unsigned long *addr = xas->xa_node->marks[(__force unsigned)mark];
  909. unsigned int offset = xas->xa_offset;
  910. if (advance)
  911. offset++;
  912. if (XA_CHUNK_SIZE == BITS_PER_LONG) {
  913. if (offset < XA_CHUNK_SIZE) {
  914. unsigned long data = *addr & (~0UL << offset);
  915. if (data)
  916. return __ffs(data);
  917. }
  918. return XA_CHUNK_SIZE;
  919. }
  920. return find_next_bit(addr, XA_CHUNK_SIZE, offset);
  921. }
  922. /**
  923. * xas_next_marked() - Advance iterator to next marked entry.
  924. * @xas: XArray operation state.
  925. * @max: Highest index to return.
  926. * @mark: Mark to search for.
  927. *
  928. * xas_next_marked() is an inline function to optimise xarray traversal for
  929. * speed. It is equivalent to calling xas_find_marked(), and will call
  930. * xas_find_marked() for all the hard cases.
  931. *
  932. * Return: The next marked entry after the one currently referred to by @xas.
  933. */
  934. static inline void *xas_next_marked(struct xa_state *xas, unsigned long max,
  935. xa_mark_t mark)
  936. {
  937. struct xa_node *node = xas->xa_node;
  938. unsigned int offset;
  939. if (unlikely(xas_not_node(node) || node->shift))
  940. return xas_find_marked(xas, max, mark);
  941. offset = xas_find_chunk(xas, true, mark);
  942. xas->xa_offset = offset;
  943. xas->xa_index = (xas->xa_index & ~XA_CHUNK_MASK) + offset;
  944. if (xas->xa_index > max)
  945. return NULL;
  946. if (offset == XA_CHUNK_SIZE)
  947. return xas_find_marked(xas, max, mark);
  948. return xa_entry(xas->xa, node, offset);
  949. }
  950. /*
  951. * If iterating while holding a lock, drop the lock and reschedule
  952. * every %XA_CHECK_SCHED loops.
  953. */
  954. enum {
  955. XA_CHECK_SCHED = 4096,
  956. };
  957. /**
  958. * xas_for_each() - Iterate over a range of an XArray.
  959. * @xas: XArray operation state.
  960. * @entry: Entry retrieved from the array.
  961. * @max: Maximum index to retrieve from array.
  962. *
  963. * The loop body will be executed for each entry present in the xarray
  964. * between the current xas position and @max. @entry will be set to
  965. * the entry retrieved from the xarray. It is safe to delete entries
  966. * from the array in the loop body. You should hold either the RCU lock
  967. * or the xa_lock while iterating. If you need to drop the lock, call
  968. * xas_pause() first.
  969. */
  970. #define xas_for_each(xas, entry, max) \
  971. for (entry = xas_find(xas, max); entry; \
  972. entry = xas_next_entry(xas, max))
  973. /**
  974. * xas_for_each_marked() - Iterate over a range of an XArray.
  975. * @xas: XArray operation state.
  976. * @entry: Entry retrieved from the array.
  977. * @max: Maximum index to retrieve from array.
  978. * @mark: Mark to search for.
  979. *
  980. * The loop body will be executed for each marked entry in the xarray
  981. * between the current xas position and @max. @entry will be set to
  982. * the entry retrieved from the xarray. It is safe to delete entries
  983. * from the array in the loop body. You should hold either the RCU lock
  984. * or the xa_lock while iterating. If you need to drop the lock, call
  985. * xas_pause() first.
  986. */
  987. #define xas_for_each_marked(xas, entry, max, mark) \
  988. for (entry = xas_find_marked(xas, max, mark); entry; \
  989. entry = xas_next_marked(xas, max, mark))
  990. #endif /* _LINUX_XARRAY_H */