xarray.h 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857
  1. /* SPDX-License-Identifier: GPL-2.0+ */
  2. #ifndef _LINUX_XARRAY_H
  3. #define _LINUX_XARRAY_H
  4. /*
  5. * eXtensible Arrays
  6. * Copyright (c) 2017 Microsoft Corporation
  7. * Author: Matthew Wilcox <willy@infradead.org>
  8. *
  9. * See Documentation/core-api/xarray.rst for how to use the XArray.
  10. */
  11. #include <linux/bug.h>
  12. #include <linux/compiler.h>
  13. #include <linux/gfp.h>
  14. #include <linux/kconfig.h>
  15. #include <linux/kernel.h>
  16. #include <linux/rcupdate.h>
  17. #include <linux/spinlock.h>
  18. #include <linux/types.h>
  19. /*
  20. * The bottom two bits of the entry determine how the XArray interprets
  21. * the contents:
  22. *
  23. * 00: Pointer entry
  24. * 10: Internal entry
  25. * x1: Value entry or tagged pointer
  26. *
  27. * Attempting to store internal entries in the XArray is a bug.
  28. *
  29. * Most internal entries are pointers to the next node in the tree.
  30. * The following internal entries have a special meaning:
  31. *
  32. * 0-62: Sibling entries
  33. * 256: Retry entry
  34. *
  35. * Errors are also represented as internal entries, but use the negative
  36. * space (-4094 to -2). They're never stored in the slots array; only
  37. * returned by the normal API.
  38. */
  39. #define BITS_PER_XA_VALUE (BITS_PER_LONG - 1)
  40. /**
  41. * xa_mk_value() - Create an XArray entry from an integer.
  42. * @v: Value to store in XArray.
  43. *
  44. * Context: Any context.
  45. * Return: An entry suitable for storing in the XArray.
  46. */
  47. static inline void *xa_mk_value(unsigned long v)
  48. {
  49. WARN_ON((long)v < 0);
  50. return (void *)((v << 1) | 1);
  51. }
  52. /**
  53. * xa_to_value() - Get value stored in an XArray entry.
  54. * @entry: XArray entry.
  55. *
  56. * Context: Any context.
  57. * Return: The value stored in the XArray entry.
  58. */
  59. static inline unsigned long xa_to_value(const void *entry)
  60. {
  61. return (unsigned long)entry >> 1;
  62. }
  63. /**
  64. * xa_is_value() - Determine if an entry is a value.
  65. * @entry: XArray entry.
  66. *
  67. * Context: Any context.
  68. * Return: True if the entry is a value, false if it is a pointer.
  69. */
  70. static inline bool xa_is_value(const void *entry)
  71. {
  72. return (unsigned long)entry & 1;
  73. }
  74. /**
  75. * xa_tag_pointer() - Create an XArray entry for a tagged pointer.
  76. * @p: Plain pointer.
  77. * @tag: Tag value (0, 1 or 3).
  78. *
  79. * If the user of the XArray prefers, they can tag their pointers instead
  80. * of storing value entries. Three tags are available (0, 1 and 3).
  81. * These are distinct from the xa_mark_t as they are not replicated up
  82. * through the array and cannot be searched for.
  83. *
  84. * Context: Any context.
  85. * Return: An XArray entry.
  86. */
  87. static inline void *xa_tag_pointer(void *p, unsigned long tag)
  88. {
  89. return (void *)((unsigned long)p | tag);
  90. }
  91. /**
  92. * xa_untag_pointer() - Turn an XArray entry into a plain pointer.
  93. * @entry: XArray entry.
  94. *
  95. * If you have stored a tagged pointer in the XArray, call this function
  96. * to get the untagged version of the pointer.
  97. *
  98. * Context: Any context.
  99. * Return: A pointer.
  100. */
  101. static inline void *xa_untag_pointer(void *entry)
  102. {
  103. return (void *)((unsigned long)entry & ~3UL);
  104. }
  105. /**
  106. * xa_pointer_tag() - Get the tag stored in an XArray entry.
  107. * @entry: XArray entry.
  108. *
  109. * If you have stored a tagged pointer in the XArray, call this function
  110. * to get the tag of that pointer.
  111. *
  112. * Context: Any context.
  113. * Return: A tag.
  114. */
  115. static inline unsigned int xa_pointer_tag(void *entry)
  116. {
  117. return (unsigned long)entry & 3UL;
  118. }
  119. /*
  120. * xa_mk_internal() - Create an internal entry.
  121. * @v: Value to turn into an internal entry.
  122. *
  123. * Context: Any context.
  124. * Return: An XArray internal entry corresponding to this value.
  125. */
  126. static inline void *xa_mk_internal(unsigned long v)
  127. {
  128. return (void *)((v << 2) | 2);
  129. }
  130. /*
  131. * xa_to_internal() - Extract the value from an internal entry.
  132. * @entry: XArray entry.
  133. *
  134. * Context: Any context.
  135. * Return: The value which was stored in the internal entry.
  136. */
  137. static inline unsigned long xa_to_internal(const void *entry)
  138. {
  139. return (unsigned long)entry >> 2;
  140. }
  141. /*
  142. * xa_is_internal() - Is the entry an internal entry?
  143. * @entry: XArray entry.
  144. *
  145. * Context: Any context.
  146. * Return: %true if the entry is an internal entry.
  147. */
  148. static inline bool xa_is_internal(const void *entry)
  149. {
  150. return ((unsigned long)entry & 3) == 2;
  151. }
  152. /**
  153. * xa_is_err() - Report whether an XArray operation returned an error
  154. * @entry: Result from calling an XArray function
  155. *
  156. * If an XArray operation cannot complete an operation, it will return
  157. * a special value indicating an error. This function tells you
  158. * whether an error occurred; xa_err() tells you which error occurred.
  159. *
  160. * Context: Any context.
  161. * Return: %true if the entry indicates an error.
  162. */
  163. static inline bool xa_is_err(const void *entry)
  164. {
  165. return unlikely(xa_is_internal(entry));
  166. }
  167. /**
  168. * xa_err() - Turn an XArray result into an errno.
  169. * @entry: Result from calling an XArray function.
  170. *
  171. * If an XArray operation cannot complete an operation, it will return
  172. * a special pointer value which encodes an errno. This function extracts
  173. * the errno from the pointer value, or returns 0 if the pointer does not
  174. * represent an errno.
  175. *
  176. * Context: Any context.
  177. * Return: A negative errno or 0.
  178. */
  179. static inline int xa_err(void *entry)
  180. {
  181. /* xa_to_internal() would not do sign extension. */
  182. if (xa_is_err(entry))
  183. return (long)entry >> 2;
  184. return 0;
  185. }
  186. typedef unsigned __bitwise xa_mark_t;
  187. #define XA_MARK_0 ((__force xa_mark_t)0U)
  188. #define XA_MARK_1 ((__force xa_mark_t)1U)
  189. #define XA_MARK_2 ((__force xa_mark_t)2U)
  190. #define XA_PRESENT ((__force xa_mark_t)8U)
  191. #define XA_MARK_MAX XA_MARK_2
  192. enum xa_lock_type {
  193. XA_LOCK_IRQ = 1,
  194. XA_LOCK_BH = 2,
  195. };
  196. /*
  197. * Values for xa_flags. The radix tree stores its GFP flags in the xa_flags,
  198. * and we remain compatible with that.
  199. */
  200. #define XA_FLAGS_LOCK_IRQ ((__force gfp_t)XA_LOCK_IRQ)
  201. #define XA_FLAGS_LOCK_BH ((__force gfp_t)XA_LOCK_BH)
  202. #define XA_FLAGS_MARK(mark) ((__force gfp_t)((1U << __GFP_BITS_SHIFT) << \
  203. (__force unsigned)(mark)))
  204. /**
  205. * struct xarray - The anchor of the XArray.
  206. * @xa_lock: Lock that protects the contents of the XArray.
  207. *
  208. * To use the xarray, define it statically or embed it in your data structure.
  209. * It is a very small data structure, so it does not usually make sense to
  210. * allocate it separately and keep a pointer to it in your data structure.
  211. *
  212. * You may use the xa_lock to protect your own data structures as well.
  213. */
  214. /*
  215. * If all of the entries in the array are NULL, @xa_head is a NULL pointer.
  216. * If the only non-NULL entry in the array is at index 0, @xa_head is that
  217. * entry. If any other entry in the array is non-NULL, @xa_head points
  218. * to an @xa_node.
  219. */
  220. struct xarray {
  221. spinlock_t xa_lock;
  222. /* private: The rest of the data structure is not to be used directly. */
  223. gfp_t xa_flags;
  224. void __rcu * xa_head;
  225. };
  226. #define XARRAY_INIT(name, flags) { \
  227. .xa_lock = __SPIN_LOCK_UNLOCKED(name.xa_lock), \
  228. .xa_flags = flags, \
  229. .xa_head = NULL, \
  230. }
  231. /**
  232. * DEFINE_XARRAY_FLAGS() - Define an XArray with custom flags.
  233. * @name: A string that names your XArray.
  234. * @flags: XA_FLAG values.
  235. *
  236. * This is intended for file scope definitions of XArrays. It declares
  237. * and initialises an empty XArray with the chosen name and flags. It is
  238. * equivalent to calling xa_init_flags() on the array, but it does the
  239. * initialisation at compiletime instead of runtime.
  240. */
  241. #define DEFINE_XARRAY_FLAGS(name, flags) \
  242. struct xarray name = XARRAY_INIT(name, flags)
  243. /**
  244. * DEFINE_XARRAY() - Define an XArray.
  245. * @name: A string that names your XArray.
  246. *
  247. * This is intended for file scope definitions of XArrays. It declares
  248. * and initialises an empty XArray with the chosen name. It is equivalent
  249. * to calling xa_init() on the array, but it does the initialisation at
  250. * compiletime instead of runtime.
  251. */
  252. #define DEFINE_XARRAY(name) DEFINE_XARRAY_FLAGS(name, 0)
  253. void xa_init_flags(struct xarray *, gfp_t flags);
  254. void *xa_load(struct xarray *, unsigned long index);
  255. void *xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
  256. bool xa_get_mark(struct xarray *, unsigned long index, xa_mark_t);
  257. void xa_set_mark(struct xarray *, unsigned long index, xa_mark_t);
  258. void xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t);
  259. /**
  260. * xa_init() - Initialise an empty XArray.
  261. * @xa: XArray.
  262. *
  263. * An empty XArray is full of NULL entries.
  264. *
  265. * Context: Any context.
  266. */
  267. static inline void xa_init(struct xarray *xa)
  268. {
  269. xa_init_flags(xa, 0);
  270. }
  271. /**
  272. * xa_empty() - Determine if an array has any present entries.
  273. * @xa: XArray.
  274. *
  275. * Context: Any context.
  276. * Return: %true if the array contains only NULL pointers.
  277. */
  278. static inline bool xa_empty(const struct xarray *xa)
  279. {
  280. return xa->xa_head == NULL;
  281. }
  282. /**
  283. * xa_marked() - Inquire whether any entry in this array has a mark set
  284. * @xa: Array
  285. * @mark: Mark value
  286. *
  287. * Context: Any context.
  288. * Return: %true if any entry has this mark set.
  289. */
  290. static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark)
  291. {
  292. return xa->xa_flags & XA_FLAGS_MARK(mark);
  293. }
  294. /**
  295. * xa_erase() - Erase this entry from the XArray.
  296. * @xa: XArray.
  297. * @index: Index of entry.
  298. *
  299. * This function is the equivalent of calling xa_store() with %NULL as
  300. * the third argument. The XArray does not need to allocate memory, so
  301. * the user does not need to provide GFP flags.
  302. *
  303. * Context: Process context. Takes and releases the xa_lock.
  304. * Return: The entry which used to be at this index.
  305. */
  306. static inline void *xa_erase(struct xarray *xa, unsigned long index)
  307. {
  308. return xa_store(xa, index, NULL, 0);
  309. }
  310. #define xa_trylock(xa) spin_trylock(&(xa)->xa_lock)
  311. #define xa_lock(xa) spin_lock(&(xa)->xa_lock)
  312. #define xa_unlock(xa) spin_unlock(&(xa)->xa_lock)
  313. #define xa_lock_bh(xa) spin_lock_bh(&(xa)->xa_lock)
  314. #define xa_unlock_bh(xa) spin_unlock_bh(&(xa)->xa_lock)
  315. #define xa_lock_irq(xa) spin_lock_irq(&(xa)->xa_lock)
  316. #define xa_unlock_irq(xa) spin_unlock_irq(&(xa)->xa_lock)
  317. #define xa_lock_irqsave(xa, flags) \
  318. spin_lock_irqsave(&(xa)->xa_lock, flags)
  319. #define xa_unlock_irqrestore(xa, flags) \
  320. spin_unlock_irqrestore(&(xa)->xa_lock, flags)
  321. /*
  322. * Versions of the normal API which require the caller to hold the
  323. * xa_lock. If the GFP flags allow it, they will drop the lock to
  324. * allocate memory, then reacquire it afterwards. These functions
  325. * may also re-enable interrupts if the XArray flags indicate the
  326. * locking should be interrupt safe.
  327. */
  328. void *__xa_erase(struct xarray *, unsigned long index);
  329. void *__xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
  330. void __xa_set_mark(struct xarray *, unsigned long index, xa_mark_t);
  331. void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t);
  332. /**
  333. * xa_erase_bh() - Erase this entry from the XArray.
  334. * @xa: XArray.
  335. * @index: Index of entry.
  336. *
  337. * This function is the equivalent of calling xa_store() with %NULL as
  338. * the third argument. The XArray does not need to allocate memory, so
  339. * the user does not need to provide GFP flags.
  340. *
  341. * Context: Process context. Takes and releases the xa_lock while
  342. * disabling softirqs.
  343. * Return: The entry which used to be at this index.
  344. */
  345. static inline void *xa_erase_bh(struct xarray *xa, unsigned long index)
  346. {
  347. void *entry;
  348. xa_lock_bh(xa);
  349. entry = __xa_erase(xa, index);
  350. xa_unlock_bh(xa);
  351. return entry;
  352. }
  353. /**
  354. * xa_erase_irq() - Erase this entry from the XArray.
  355. * @xa: XArray.
  356. * @index: Index of entry.
  357. *
  358. * This function is the equivalent of calling xa_store() with %NULL as
  359. * the third argument. The XArray does not need to allocate memory, so
  360. * the user does not need to provide GFP flags.
  361. *
  362. * Context: Process context. Takes and releases the xa_lock while
  363. * disabling interrupts.
  364. * Return: The entry which used to be at this index.
  365. */
  366. static inline void *xa_erase_irq(struct xarray *xa, unsigned long index)
  367. {
  368. void *entry;
  369. xa_lock_irq(xa);
  370. entry = __xa_erase(xa, index);
  371. xa_unlock_irq(xa);
  372. return entry;
  373. }
  374. /* Everything below here is the Advanced API. Proceed with caution. */
  375. /*
  376. * The xarray is constructed out of a set of 'chunks' of pointers. Choosing
  377. * the best chunk size requires some tradeoffs. A power of two recommends
  378. * itself so that we can walk the tree based purely on shifts and masks.
  379. * Generally, the larger the better; as the number of slots per level of the
  380. * tree increases, the less tall the tree needs to be. But that needs to be
  381. * balanced against the memory consumption of each node. On a 64-bit system,
  382. * xa_node is currently 576 bytes, and we get 7 of them per 4kB page. If we
  383. * doubled the number of slots per node, we'd get only 3 nodes per 4kB page.
  384. */
  385. #ifndef XA_CHUNK_SHIFT
  386. #define XA_CHUNK_SHIFT (CONFIG_BASE_SMALL ? 4 : 6)
  387. #endif
  388. #define XA_CHUNK_SIZE (1UL << XA_CHUNK_SHIFT)
  389. #define XA_CHUNK_MASK (XA_CHUNK_SIZE - 1)
  390. #define XA_MAX_MARKS 3
  391. #define XA_MARK_LONGS DIV_ROUND_UP(XA_CHUNK_SIZE, BITS_PER_LONG)
  392. /*
  393. * @count is the count of every non-NULL element in the ->slots array
  394. * whether that is a value entry, a retry entry, a user pointer,
  395. * a sibling entry or a pointer to the next level of the tree.
  396. * @nr_values is the count of every element in ->slots which is
  397. * either a value entry or a sibling of a value entry.
  398. */
  399. struct xa_node {
  400. unsigned char shift; /* Bits remaining in each slot */
  401. unsigned char offset; /* Slot offset in parent */
  402. unsigned char count; /* Total entry count */
  403. unsigned char nr_values; /* Value entry count */
  404. struct xa_node __rcu *parent; /* NULL at top of tree */
  405. struct xarray *array; /* The array we belong to */
  406. union {
  407. struct list_head private_list; /* For tree user */
  408. struct rcu_head rcu_head; /* Used when freeing node */
  409. };
  410. void __rcu *slots[XA_CHUNK_SIZE];
  411. union {
  412. unsigned long tags[XA_MAX_MARKS][XA_MARK_LONGS];
  413. unsigned long marks[XA_MAX_MARKS][XA_MARK_LONGS];
  414. };
  415. };
  416. void xa_dump(const struct xarray *);
  417. void xa_dump_node(const struct xa_node *);
  418. #ifdef XA_DEBUG
  419. #define XA_BUG_ON(xa, x) do { \
  420. if (x) { \
  421. xa_dump(xa); \
  422. BUG(); \
  423. } \
  424. } while (0)
  425. #define XA_NODE_BUG_ON(node, x) do { \
  426. if (x) { \
  427. if (node) xa_dump_node(node); \
  428. BUG(); \
  429. } \
  430. } while (0)
  431. #else
  432. #define XA_BUG_ON(xa, x) do { } while (0)
  433. #define XA_NODE_BUG_ON(node, x) do { } while (0)
  434. #endif
  435. /* Private */
  436. static inline void *xa_head(const struct xarray *xa)
  437. {
  438. return rcu_dereference_check(xa->xa_head,
  439. lockdep_is_held(&xa->xa_lock));
  440. }
  441. /* Private */
  442. static inline void *xa_head_locked(const struct xarray *xa)
  443. {
  444. return rcu_dereference_protected(xa->xa_head,
  445. lockdep_is_held(&xa->xa_lock));
  446. }
  447. /* Private */
  448. static inline void *xa_entry(const struct xarray *xa,
  449. const struct xa_node *node, unsigned int offset)
  450. {
  451. XA_NODE_BUG_ON(node, offset >= XA_CHUNK_SIZE);
  452. return rcu_dereference_check(node->slots[offset],
  453. lockdep_is_held(&xa->xa_lock));
  454. }
  455. /* Private */
  456. static inline void *xa_entry_locked(const struct xarray *xa,
  457. const struct xa_node *node, unsigned int offset)
  458. {
  459. XA_NODE_BUG_ON(node, offset >= XA_CHUNK_SIZE);
  460. return rcu_dereference_protected(node->slots[offset],
  461. lockdep_is_held(&xa->xa_lock));
  462. }
  463. /* Private */
  464. static inline struct xa_node *xa_parent(const struct xarray *xa,
  465. const struct xa_node *node)
  466. {
  467. return rcu_dereference_check(node->parent,
  468. lockdep_is_held(&xa->xa_lock));
  469. }
  470. /* Private */
  471. static inline struct xa_node *xa_parent_locked(const struct xarray *xa,
  472. const struct xa_node *node)
  473. {
  474. return rcu_dereference_protected(node->parent,
  475. lockdep_is_held(&xa->xa_lock));
  476. }
  477. /* Private */
  478. static inline void *xa_mk_node(const struct xa_node *node)
  479. {
  480. return (void *)((unsigned long)node | 2);
  481. }
  482. /* Private */
  483. static inline struct xa_node *xa_to_node(const void *entry)
  484. {
  485. return (struct xa_node *)((unsigned long)entry - 2);
  486. }
  487. /* Private */
  488. static inline bool xa_is_node(const void *entry)
  489. {
  490. return xa_is_internal(entry) && (unsigned long)entry > 4096;
  491. }
  492. /* Private */
  493. static inline void *xa_mk_sibling(unsigned int offset)
  494. {
  495. return xa_mk_internal(offset);
  496. }
  497. /* Private */
  498. static inline unsigned long xa_to_sibling(const void *entry)
  499. {
  500. return xa_to_internal(entry);
  501. }
  502. /**
  503. * xa_is_sibling() - Is the entry a sibling entry?
  504. * @entry: Entry retrieved from the XArray
  505. *
  506. * Return: %true if the entry is a sibling entry.
  507. */
  508. static inline bool xa_is_sibling(const void *entry)
  509. {
  510. return IS_ENABLED(CONFIG_XARRAY_MULTI) && xa_is_internal(entry) &&
  511. (entry < xa_mk_sibling(XA_CHUNK_SIZE - 1));
  512. }
  513. #define XA_RETRY_ENTRY xa_mk_internal(256)
  514. /**
  515. * xa_is_retry() - Is the entry a retry entry?
  516. * @entry: Entry retrieved from the XArray
  517. *
  518. * Return: %true if the entry is a retry entry.
  519. */
  520. static inline bool xa_is_retry(const void *entry)
  521. {
  522. return unlikely(entry == XA_RETRY_ENTRY);
  523. }
  524. /**
  525. * typedef xa_update_node_t - A callback function from the XArray.
  526. * @node: The node which is being processed
  527. *
  528. * This function is called every time the XArray updates the count of
  529. * present and value entries in a node. It allows advanced users to
  530. * maintain the private_list in the node.
  531. *
  532. * Context: The xa_lock is held and interrupts may be disabled.
  533. * Implementations should not drop the xa_lock, nor re-enable
  534. * interrupts.
  535. */
  536. typedef void (*xa_update_node_t)(struct xa_node *node);
  537. /*
  538. * The xa_state is opaque to its users. It contains various different pieces
  539. * of state involved in the current operation on the XArray. It should be
  540. * declared on the stack and passed between the various internal routines.
  541. * The various elements in it should not be accessed directly, but only
  542. * through the provided accessor functions. The below documentation is for
  543. * the benefit of those working on the code, not for users of the XArray.
  544. *
  545. * @xa_node usually points to the xa_node containing the slot we're operating
  546. * on (and @xa_offset is the offset in the slots array). If there is a
  547. * single entry in the array at index 0, there are no allocated xa_nodes to
  548. * point to, and so we store %NULL in @xa_node. @xa_node is set to
  549. * the value %XAS_RESTART if the xa_state is not walked to the correct
  550. * position in the tree of nodes for this operation. If an error occurs
  551. * during an operation, it is set to an %XAS_ERROR value. If we run off the
  552. * end of the allocated nodes, it is set to %XAS_BOUNDS.
  553. */
  554. struct xa_state {
  555. struct xarray *xa;
  556. unsigned long xa_index;
  557. unsigned char xa_shift;
  558. unsigned char xa_sibs;
  559. unsigned char xa_offset;
  560. unsigned char xa_pad; /* Helps gcc generate better code */
  561. struct xa_node *xa_node;
  562. struct xa_node *xa_alloc;
  563. xa_update_node_t xa_update;
  564. };
  565. /*
  566. * We encode errnos in the xas->xa_node. If an error has happened, we need to
  567. * drop the lock to fix it, and once we've done so the xa_state is invalid.
  568. */
  569. #define XA_ERROR(errno) ((struct xa_node *)(((unsigned long)errno << 2) | 2UL))
  570. #define XAS_BOUNDS ((struct xa_node *)1UL)
  571. #define XAS_RESTART ((struct xa_node *)3UL)
  572. #define __XA_STATE(array, index, shift, sibs) { \
  573. .xa = array, \
  574. .xa_index = index, \
  575. .xa_shift = shift, \
  576. .xa_sibs = sibs, \
  577. .xa_offset = 0, \
  578. .xa_pad = 0, \
  579. .xa_node = XAS_RESTART, \
  580. .xa_alloc = NULL, \
  581. .xa_update = NULL \
  582. }
  583. /**
  584. * XA_STATE() - Declare an XArray operation state.
  585. * @name: Name of this operation state (usually xas).
  586. * @array: Array to operate on.
  587. * @index: Initial index of interest.
  588. *
  589. * Declare and initialise an xa_state on the stack.
  590. */
  591. #define XA_STATE(name, array, index) \
  592. struct xa_state name = __XA_STATE(array, index, 0, 0)
  593. /**
  594. * XA_STATE_ORDER() - Declare an XArray operation state.
  595. * @name: Name of this operation state (usually xas).
  596. * @array: Array to operate on.
  597. * @index: Initial index of interest.
  598. * @order: Order of entry.
  599. *
  600. * Declare and initialise an xa_state on the stack. This variant of
  601. * XA_STATE() allows you to specify the 'order' of the element you
  602. * want to operate on.`
  603. */
  604. #define XA_STATE_ORDER(name, array, index, order) \
  605. struct xa_state name = __XA_STATE(array, \
  606. (index >> order) << order, \
  607. order - (order % XA_CHUNK_SHIFT), \
  608. (1U << (order % XA_CHUNK_SHIFT)) - 1)
  609. #define xas_marked(xas, mark) xa_marked((xas)->xa, (mark))
  610. #define xas_trylock(xas) xa_trylock((xas)->xa)
  611. #define xas_lock(xas) xa_lock((xas)->xa)
  612. #define xas_unlock(xas) xa_unlock((xas)->xa)
  613. #define xas_lock_bh(xas) xa_lock_bh((xas)->xa)
  614. #define xas_unlock_bh(xas) xa_unlock_bh((xas)->xa)
  615. #define xas_lock_irq(xas) xa_lock_irq((xas)->xa)
  616. #define xas_unlock_irq(xas) xa_unlock_irq((xas)->xa)
  617. #define xas_lock_irqsave(xas, flags) \
  618. xa_lock_irqsave((xas)->xa, flags)
  619. #define xas_unlock_irqrestore(xas, flags) \
  620. xa_unlock_irqrestore((xas)->xa, flags)
  621. /**
  622. * xas_error() - Return an errno stored in the xa_state.
  623. * @xas: XArray operation state.
  624. *
  625. * Return: 0 if no error has been noted. A negative errno if one has.
  626. */
  627. static inline int xas_error(const struct xa_state *xas)
  628. {
  629. return xa_err(xas->xa_node);
  630. }
  631. /**
  632. * xas_set_err() - Note an error in the xa_state.
  633. * @xas: XArray operation state.
  634. * @err: Negative error number.
  635. *
  636. * Only call this function with a negative @err; zero or positive errors
  637. * will probably not behave the way you think they should. If you want
  638. * to clear the error from an xa_state, use xas_reset().
  639. */
  640. static inline void xas_set_err(struct xa_state *xas, long err)
  641. {
  642. xas->xa_node = XA_ERROR(err);
  643. }
  644. /**
  645. * xas_invalid() - Is the xas in a retry or error state?
  646. * @xas: XArray operation state.
  647. *
  648. * Return: %true if the xas cannot be used for operations.
  649. */
  650. static inline bool xas_invalid(const struct xa_state *xas)
  651. {
  652. return (unsigned long)xas->xa_node & 3;
  653. }
  654. /**
  655. * xas_valid() - Is the xas a valid cursor into the array?
  656. * @xas: XArray operation state.
  657. *
  658. * Return: %true if the xas can be used for operations.
  659. */
  660. static inline bool xas_valid(const struct xa_state *xas)
  661. {
  662. return !xas_invalid(xas);
  663. }
  664. /* True if the pointer is something other than a node */
  665. static inline bool xas_not_node(struct xa_node *node)
  666. {
  667. return ((unsigned long)node & 3) || !node;
  668. }
  669. /* True if the node represents head-of-tree, RESTART or BOUNDS */
  670. static inline bool xas_top(struct xa_node *node)
  671. {
  672. return node <= XAS_RESTART;
  673. }
  674. /**
  675. * xas_reset() - Reset an XArray operation state.
  676. * @xas: XArray operation state.
  677. *
  678. * Resets the error or walk state of the @xas so future walks of the
  679. * array will start from the root. Use this if you have dropped the
  680. * xarray lock and want to reuse the xa_state.
  681. *
  682. * Context: Any context.
  683. */
  684. static inline void xas_reset(struct xa_state *xas)
  685. {
  686. xas->xa_node = XAS_RESTART;
  687. }
  688. /**
  689. * xas_retry() - Retry the operation if appropriate.
  690. * @xas: XArray operation state.
  691. * @entry: Entry from xarray.
  692. *
  693. * The advanced functions may sometimes return an internal entry, such as
  694. * a retry entry or a zero entry. This function sets up the @xas to restart
  695. * the walk from the head of the array if needed.
  696. *
  697. * Context: Any context.
  698. * Return: true if the operation needs to be retried.
  699. */
  700. static inline bool xas_retry(struct xa_state *xas, const void *entry)
  701. {
  702. if (!xa_is_retry(entry))
  703. return false;
  704. xas_reset(xas);
  705. return true;
  706. }
  707. void *xas_load(struct xa_state *);
  708. void *xas_store(struct xa_state *, void *entry);
  709. bool xas_get_mark(const struct xa_state *, xa_mark_t);
  710. void xas_set_mark(const struct xa_state *, xa_mark_t);
  711. void xas_clear_mark(const struct xa_state *, xa_mark_t);
  712. void xas_init_marks(const struct xa_state *);
  713. bool xas_nomem(struct xa_state *, gfp_t);
  714. /**
  715. * xas_reload() - Refetch an entry from the xarray.
  716. * @xas: XArray operation state.
  717. *
  718. * Use this function to check that a previously loaded entry still has
  719. * the same value. This is useful for the lockless pagecache lookup where
  720. * we walk the array with only the RCU lock to protect us, lock the page,
  721. * then check that the page hasn't moved since we looked it up.
  722. *
  723. * The caller guarantees that @xas is still valid. If it may be in an
  724. * error or restart state, call xas_load() instead.
  725. *
  726. * Return: The entry at this location in the xarray.
  727. */
  728. static inline void *xas_reload(struct xa_state *xas)
  729. {
  730. struct xa_node *node = xas->xa_node;
  731. if (node)
  732. return xa_entry(xas->xa, node, xas->xa_offset);
  733. return xa_head(xas->xa);
  734. }
  735. /**
  736. * xas_set() - Set up XArray operation state for a different index.
  737. * @xas: XArray operation state.
  738. * @index: New index into the XArray.
  739. *
  740. * Move the operation state to refer to a different index. This will
  741. * have the effect of starting a walk from the top; see xas_next()
  742. * to move to an adjacent index.
  743. */
  744. static inline void xas_set(struct xa_state *xas, unsigned long index)
  745. {
  746. xas->xa_index = index;
  747. xas->xa_node = XAS_RESTART;
  748. }
  749. /**
  750. * xas_set_order() - Set up XArray operation state for a multislot entry.
  751. * @xas: XArray operation state.
  752. * @index: Target of the operation.
  753. * @order: Entry occupies 2^@order indices.
  754. */
  755. static inline void xas_set_order(struct xa_state *xas, unsigned long index,
  756. unsigned int order)
  757. {
  758. #ifdef CONFIG_XARRAY_MULTI
  759. xas->xa_index = order < BITS_PER_LONG ? (index >> order) << order : 0;
  760. xas->xa_shift = order - (order % XA_CHUNK_SHIFT);
  761. xas->xa_sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1;
  762. xas->xa_node = XAS_RESTART;
  763. #else
  764. BUG_ON(order > 0);
  765. xas_set(xas, index);
  766. #endif
  767. }
  768. /**
  769. * xas_set_update() - Set up XArray operation state for a callback.
  770. * @xas: XArray operation state.
  771. * @update: Function to call when updating a node.
  772. *
  773. * The XArray can notify a caller after it has updated an xa_node.
  774. * This is advanced functionality and is only needed by the page cache.
  775. */
  776. static inline void xas_set_update(struct xa_state *xas, xa_update_node_t update)
  777. {
  778. xas->xa_update = update;
  779. }
  780. #endif /* _LINUX_XARRAY_H */