idr.h 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280
  1. /*
  2. * include/linux/idr.h
  3. *
  4. * 2002-10-18 written by Jim Houston jim.houston@ccur.com
  5. * Copyright (C) 2002 by Concurrent Computer Corporation
  6. * Distributed under the GNU GPL license version 2.
  7. *
  8. * Small id to pointer translation service avoiding fixed sized
  9. * tables.
  10. */
  11. #ifndef __IDR_H__
  12. #define __IDR_H__
  13. #include <linux/radix-tree.h>
  14. #include <linux/gfp.h>
  15. #include <linux/percpu.h>
  16. #include <linux/bug.h>
  17. struct idr {
  18. struct radix_tree_root idr_rt;
  19. unsigned int idr_next;
  20. };
  21. /*
  22. * The IDR API does not expose the tagging functionality of the radix tree
  23. * to users. Use tag 0 to track whether a node has free space below it.
  24. */
  25. #define IDR_FREE 0
  26. /* Set the IDR flag and the IDR_FREE tag */
  27. #define IDR_RT_MARKER ((__force gfp_t)(3 << __GFP_BITS_SHIFT))
  28. #define IDR_INIT \
  29. { \
  30. .idr_rt = RADIX_TREE_INIT(IDR_RT_MARKER) \
  31. }
  32. #define DEFINE_IDR(name) struct idr name = IDR_INIT
  33. /**
  34. * idr_get_cursor - Return the current position of the cyclic allocator
  35. * @idr: idr handle
  36. *
  37. * The value returned is the value that will be next returned from
  38. * idr_alloc_cyclic() if it is free (otherwise the search will start from
  39. * this position).
  40. */
  41. static inline unsigned int idr_get_cursor(const struct idr *idr)
  42. {
  43. return READ_ONCE(idr->idr_next);
  44. }
  45. /**
  46. * idr_set_cursor - Set the current position of the cyclic allocator
  47. * @idr: idr handle
  48. * @val: new position
  49. *
  50. * The next call to idr_alloc_cyclic() will return @val if it is free
  51. * (otherwise the search will start from this position).
  52. */
  53. static inline void idr_set_cursor(struct idr *idr, unsigned int val)
  54. {
  55. WRITE_ONCE(idr->idr_next, val);
  56. }
  57. /**
  58. * DOC: idr sync
  59. * idr synchronization (stolen from radix-tree.h)
  60. *
  61. * idr_find() is able to be called locklessly, using RCU. The caller must
  62. * ensure calls to this function are made within rcu_read_lock() regions.
  63. * Other readers (lock-free or otherwise) and modifications may be running
  64. * concurrently.
  65. *
  66. * It is still required that the caller manage the synchronization and
  67. * lifetimes of the items. So if RCU lock-free lookups are used, typically
  68. * this would mean that the items have their own locks, or are amenable to
  69. * lock-free access; and that the items are freed by RCU (or only freed after
  70. * having been deleted from the idr tree *and* a synchronize_rcu() grace
  71. * period).
  72. */
  73. void idr_preload(gfp_t gfp_mask);
  74. int idr_alloc_cmn(struct idr *idr, void *ptr, unsigned long *index,
  75. unsigned long start, unsigned long end, gfp_t gfp,
  76. bool ext);
  77. /**
  78. * idr_alloc - allocate an id
  79. * @idr: idr handle
  80. * @ptr: pointer to be associated with the new id
  81. * @start: the minimum id (inclusive)
  82. * @end: the maximum id (exclusive)
  83. * @gfp: memory allocation flags
  84. *
  85. * Allocates an unused ID in the range [start, end). Returns -ENOSPC
  86. * if there are no unused IDs in that range.
  87. *
  88. * Note that @end is treated as max when <= 0. This is to always allow
  89. * using @start + N as @end as long as N is inside integer range.
  90. *
  91. * Simultaneous modifications to the @idr are not allowed and should be
  92. * prevented by the user, usually with a lock. idr_alloc() may be called
  93. * concurrently with read-only accesses to the @idr, such as idr_find() and
  94. * idr_for_each_entry().
  95. */
  96. static inline int idr_alloc(struct idr *idr, void *ptr,
  97. int start, int end, gfp_t gfp)
  98. {
  99. unsigned long id;
  100. int ret;
  101. if (WARN_ON_ONCE(start < 0))
  102. return -EINVAL;
  103. ret = idr_alloc_cmn(idr, ptr, &id, start, end, gfp, false);
  104. if (ret)
  105. return ret;
  106. return id;
  107. }
  108. static inline int idr_alloc_ext(struct idr *idr, void *ptr,
  109. unsigned long *index,
  110. unsigned long start,
  111. unsigned long end,
  112. gfp_t gfp)
  113. {
  114. return idr_alloc_cmn(idr, ptr, index, start, end, gfp, true);
  115. }
  116. int idr_alloc_cyclic(struct idr *, void *entry, int start, int end, gfp_t);
  117. int idr_for_each(const struct idr *,
  118. int (*fn)(int id, void *p, void *data), void *data);
  119. void *idr_get_next(struct idr *, int *nextid);
  120. void *idr_get_next_ext(struct idr *idr, unsigned long *nextid);
  121. void *idr_replace(struct idr *, void *, int id);
  122. void *idr_replace_ext(struct idr *idr, void *ptr, unsigned long id);
  123. void idr_destroy(struct idr *);
  124. static inline void *idr_remove_ext(struct idr *idr, unsigned long id)
  125. {
  126. return radix_tree_delete_item(&idr->idr_rt, id, NULL);
  127. }
  128. static inline void *idr_remove(struct idr *idr, int id)
  129. {
  130. return idr_remove_ext(idr, id);
  131. }
  132. static inline void idr_init(struct idr *idr)
  133. {
  134. INIT_RADIX_TREE(&idr->idr_rt, IDR_RT_MARKER);
  135. idr->idr_next = 0;
  136. }
  137. static inline bool idr_is_empty(const struct idr *idr)
  138. {
  139. return radix_tree_empty(&idr->idr_rt) &&
  140. radix_tree_tagged(&idr->idr_rt, IDR_FREE);
  141. }
  142. /**
  143. * idr_preload_end - end preload section started with idr_preload()
  144. *
  145. * Each idr_preload() should be matched with an invocation of this
  146. * function. See idr_preload() for details.
  147. */
  148. static inline void idr_preload_end(void)
  149. {
  150. preempt_enable();
  151. }
  152. /**
  153. * idr_find - return pointer for given id
  154. * @idr: idr handle
  155. * @id: lookup key
  156. *
  157. * Return the pointer given the id it has been registered with. A %NULL
  158. * return indicates that @id is not valid or you passed %NULL in
  159. * idr_get_new().
  160. *
  161. * This function can be called under rcu_read_lock(), given that the leaf
  162. * pointers lifetimes are correctly managed.
  163. */
  164. static inline void *idr_find_ext(const struct idr *idr, unsigned long id)
  165. {
  166. return radix_tree_lookup(&idr->idr_rt, id);
  167. }
  168. static inline void *idr_find(const struct idr *idr, int id)
  169. {
  170. return idr_find_ext(idr, id);
  171. }
  172. /**
  173. * idr_for_each_entry - iterate over an idr's elements of a given type
  174. * @idr: idr handle
  175. * @entry: the type * to use as cursor
  176. * @id: id entry's key
  177. *
  178. * @entry and @id do not need to be initialized before the loop, and
  179. * after normal terminatinon @entry is left with the value NULL. This
  180. * is convenient for a "not found" value.
  181. */
  182. #define idr_for_each_entry(idr, entry, id) \
  183. for (id = 0; ((entry) = idr_get_next(idr, &(id))) != NULL; ++id)
  184. #define idr_for_each_entry_ext(idr, entry, id) \
  185. for (id = 0; ((entry) = idr_get_next_ext(idr, &(id))) != NULL; ++id)
  186. /**
  187. * idr_for_each_entry_continue - continue iteration over an idr's elements of a given type
  188. * @idr: idr handle
  189. * @entry: the type * to use as cursor
  190. * @id: id entry's key
  191. *
  192. * Continue to iterate over list of given type, continuing after
  193. * the current position.
  194. */
  195. #define idr_for_each_entry_continue(idr, entry, id) \
  196. for ((entry) = idr_get_next((idr), &(id)); \
  197. entry; \
  198. ++id, (entry) = idr_get_next((idr), &(id)))
  199. /*
  200. * IDA - IDR based id allocator, use when translation from id to
  201. * pointer isn't necessary.
  202. */
  203. #define IDA_CHUNK_SIZE 128 /* 128 bytes per chunk */
  204. #define IDA_BITMAP_LONGS (IDA_CHUNK_SIZE / sizeof(long))
  205. #define IDA_BITMAP_BITS (IDA_BITMAP_LONGS * sizeof(long) * 8)
  206. struct ida_bitmap {
  207. unsigned long bitmap[IDA_BITMAP_LONGS];
  208. };
  209. DECLARE_PER_CPU(struct ida_bitmap *, ida_bitmap);
  210. struct ida {
  211. struct radix_tree_root ida_rt;
  212. };
  213. #define IDA_INIT { \
  214. .ida_rt = RADIX_TREE_INIT(IDR_RT_MARKER | GFP_NOWAIT), \
  215. }
  216. #define DEFINE_IDA(name) struct ida name = IDA_INIT
  217. int ida_pre_get(struct ida *ida, gfp_t gfp_mask);
  218. int ida_get_new_above(struct ida *ida, int starting_id, int *p_id);
  219. void ida_remove(struct ida *ida, int id);
  220. void ida_destroy(struct ida *ida);
  221. int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
  222. gfp_t gfp_mask);
  223. void ida_simple_remove(struct ida *ida, unsigned int id);
  224. static inline void ida_init(struct ida *ida)
  225. {
  226. INIT_RADIX_TREE(&ida->ida_rt, IDR_RT_MARKER | GFP_NOWAIT);
  227. }
  228. /**
  229. * ida_get_new - allocate new ID
  230. * @ida: idr handle
  231. * @p_id: pointer to the allocated handle
  232. *
  233. * Simple wrapper around ida_get_new_above() w/ @starting_id of zero.
  234. */
  235. static inline int ida_get_new(struct ida *ida, int *p_id)
  236. {
  237. return ida_get_new_above(ida, 0, p_id);
  238. }
  239. static inline bool ida_is_empty(const struct ida *ida)
  240. {
  241. return radix_tree_empty(&ida->ida_rt);
  242. }
  243. #endif /* __IDR_H__ */