idr.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592
  1. #include <linux/bitmap.h>
  2. #include <linux/bug.h>
  3. #include <linux/export.h>
  4. #include <linux/idr.h>
  5. #include <linux/slab.h>
  6. #include <linux/spinlock.h>
  7. #include <linux/xarray.h>
  8. /**
  9. * idr_alloc_u32() - Allocate an ID.
  10. * @idr: IDR handle.
  11. * @ptr: Pointer to be associated with the new ID.
  12. * @nextid: Pointer to an ID.
  13. * @max: The maximum ID to allocate (inclusive).
  14. * @gfp: Memory allocation flags.
  15. *
  16. * Allocates an unused ID in the range specified by @nextid and @max.
  17. * Note that @max is inclusive whereas the @end parameter to idr_alloc()
  18. * is exclusive. The new ID is assigned to @nextid before the pointer
  19. * is inserted into the IDR, so if @nextid points into the object pointed
  20. * to by @ptr, a concurrent lookup will not find an uninitialised ID.
  21. *
  22. * The caller should provide their own locking to ensure that two
  23. * concurrent modifications to the IDR are not possible. Read-only
  24. * accesses to the IDR may be done under the RCU read lock or may
  25. * exclude simultaneous writers.
  26. *
  27. * Return: 0 if an ID was allocated, -ENOMEM if memory allocation failed,
  28. * or -ENOSPC if no free IDs could be found. If an error occurred,
  29. * @nextid is unchanged.
  30. */
  31. int idr_alloc_u32(struct idr *idr, void *ptr, u32 *nextid,
  32. unsigned long max, gfp_t gfp)
  33. {
  34. struct radix_tree_iter iter;
  35. void __rcu **slot;
  36. unsigned int base = idr->idr_base;
  37. unsigned int id = *nextid;
  38. if (WARN_ON_ONCE(!(idr->idr_rt.xa_flags & ROOT_IS_IDR)))
  39. idr->idr_rt.xa_flags |= IDR_RT_MARKER;
  40. id = (id < base) ? 0 : id - base;
  41. radix_tree_iter_init(&iter, id);
  42. slot = idr_get_free(&idr->idr_rt, &iter, gfp, max - base);
  43. if (IS_ERR(slot))
  44. return PTR_ERR(slot);
  45. *nextid = iter.index + base;
  46. /* there is a memory barrier inside radix_tree_iter_replace() */
  47. radix_tree_iter_replace(&idr->idr_rt, &iter, slot, ptr);
  48. radix_tree_iter_tag_clear(&idr->idr_rt, &iter, IDR_FREE);
  49. return 0;
  50. }
  51. EXPORT_SYMBOL_GPL(idr_alloc_u32);
  52. /**
  53. * idr_alloc() - Allocate an ID.
  54. * @idr: IDR handle.
  55. * @ptr: Pointer to be associated with the new ID.
  56. * @start: The minimum ID (inclusive).
  57. * @end: The maximum ID (exclusive).
  58. * @gfp: Memory allocation flags.
  59. *
  60. * Allocates an unused ID in the range specified by @start and @end. If
  61. * @end is <= 0, it is treated as one larger than %INT_MAX. This allows
  62. * callers to use @start + N as @end as long as N is within integer range.
  63. *
  64. * The caller should provide their own locking to ensure that two
  65. * concurrent modifications to the IDR are not possible. Read-only
  66. * accesses to the IDR may be done under the RCU read lock or may
  67. * exclude simultaneous writers.
  68. *
  69. * Return: The newly allocated ID, -ENOMEM if memory allocation failed,
  70. * or -ENOSPC if no free IDs could be found.
  71. */
  72. int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp)
  73. {
  74. u32 id = start;
  75. int ret;
  76. if (WARN_ON_ONCE(start < 0))
  77. return -EINVAL;
  78. ret = idr_alloc_u32(idr, ptr, &id, end > 0 ? end - 1 : INT_MAX, gfp);
  79. if (ret)
  80. return ret;
  81. return id;
  82. }
  83. EXPORT_SYMBOL_GPL(idr_alloc);
  84. /**
  85. * idr_alloc_cyclic() - Allocate an ID cyclically.
  86. * @idr: IDR handle.
  87. * @ptr: Pointer to be associated with the new ID.
  88. * @start: The minimum ID (inclusive).
  89. * @end: The maximum ID (exclusive).
  90. * @gfp: Memory allocation flags.
  91. *
  92. * Allocates an unused ID in the range specified by @nextid and @end. If
  93. * @end is <= 0, it is treated as one larger than %INT_MAX. This allows
  94. * callers to use @start + N as @end as long as N is within integer range.
  95. * The search for an unused ID will start at the last ID allocated and will
  96. * wrap around to @start if no free IDs are found before reaching @end.
  97. *
  98. * The caller should provide their own locking to ensure that two
  99. * concurrent modifications to the IDR are not possible. Read-only
  100. * accesses to the IDR may be done under the RCU read lock or may
  101. * exclude simultaneous writers.
  102. *
  103. * Return: The newly allocated ID, -ENOMEM if memory allocation failed,
  104. * or -ENOSPC if no free IDs could be found.
  105. */
  106. int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp)
  107. {
  108. u32 id = idr->idr_next;
  109. int err, max = end > 0 ? end - 1 : INT_MAX;
  110. if ((int)id < start)
  111. id = start;
  112. err = idr_alloc_u32(idr, ptr, &id, max, gfp);
  113. if ((err == -ENOSPC) && (id > start)) {
  114. id = start;
  115. err = idr_alloc_u32(idr, ptr, &id, max, gfp);
  116. }
  117. if (err)
  118. return err;
  119. idr->idr_next = id + 1;
  120. return id;
  121. }
  122. EXPORT_SYMBOL(idr_alloc_cyclic);
  123. /**
  124. * idr_remove() - Remove an ID from the IDR.
  125. * @idr: IDR handle.
  126. * @id: Pointer ID.
  127. *
  128. * Removes this ID from the IDR. If the ID was not previously in the IDR,
  129. * this function returns %NULL.
  130. *
  131. * Since this function modifies the IDR, the caller should provide their
  132. * own locking to ensure that concurrent modification of the same IDR is
  133. * not possible.
  134. *
  135. * Return: The pointer formerly associated with this ID.
  136. */
  137. void *idr_remove(struct idr *idr, unsigned long id)
  138. {
  139. return radix_tree_delete_item(&idr->idr_rt, id - idr->idr_base, NULL);
  140. }
  141. EXPORT_SYMBOL_GPL(idr_remove);
  142. /**
  143. * idr_find() - Return pointer for given ID.
  144. * @idr: IDR handle.
  145. * @id: Pointer ID.
  146. *
  147. * Looks up the pointer associated with this ID. A %NULL pointer may
  148. * indicate that @id is not allocated or that the %NULL pointer was
  149. * associated with this ID.
  150. *
  151. * This function can be called under rcu_read_lock(), given that the leaf
  152. * pointers lifetimes are correctly managed.
  153. *
  154. * Return: The pointer associated with this ID.
  155. */
  156. void *idr_find(const struct idr *idr, unsigned long id)
  157. {
  158. return radix_tree_lookup(&idr->idr_rt, id - idr->idr_base);
  159. }
  160. EXPORT_SYMBOL_GPL(idr_find);
  161. /**
  162. * idr_for_each() - Iterate through all stored pointers.
  163. * @idr: IDR handle.
  164. * @fn: Function to be called for each pointer.
  165. * @data: Data passed to callback function.
  166. *
  167. * The callback function will be called for each entry in @idr, passing
  168. * the ID, the entry and @data.
  169. *
  170. * If @fn returns anything other than %0, the iteration stops and that
  171. * value is returned from this function.
  172. *
  173. * idr_for_each() can be called concurrently with idr_alloc() and
  174. * idr_remove() if protected by RCU. Newly added entries may not be
  175. * seen and deleted entries may be seen, but adding and removing entries
  176. * will not cause other entries to be skipped, nor spurious ones to be seen.
  177. */
  178. int idr_for_each(const struct idr *idr,
  179. int (*fn)(int id, void *p, void *data), void *data)
  180. {
  181. struct radix_tree_iter iter;
  182. void __rcu **slot;
  183. int base = idr->idr_base;
  184. radix_tree_for_each_slot(slot, &idr->idr_rt, &iter, 0) {
  185. int ret;
  186. unsigned long id = iter.index + base;
  187. if (WARN_ON_ONCE(id > INT_MAX))
  188. break;
  189. ret = fn(id, rcu_dereference_raw(*slot), data);
  190. if (ret)
  191. return ret;
  192. }
  193. return 0;
  194. }
  195. EXPORT_SYMBOL(idr_for_each);
  196. /**
  197. * idr_get_next() - Find next populated entry.
  198. * @idr: IDR handle.
  199. * @nextid: Pointer to an ID.
  200. *
  201. * Returns the next populated entry in the tree with an ID greater than
  202. * or equal to the value pointed to by @nextid. On exit, @nextid is updated
  203. * to the ID of the found value. To use in a loop, the value pointed to by
  204. * nextid must be incremented by the user.
  205. */
  206. void *idr_get_next(struct idr *idr, int *nextid)
  207. {
  208. struct radix_tree_iter iter;
  209. void __rcu **slot;
  210. unsigned long base = idr->idr_base;
  211. unsigned long id = *nextid;
  212. id = (id < base) ? 0 : id - base;
  213. slot = radix_tree_iter_find(&idr->idr_rt, &iter, id);
  214. if (!slot)
  215. return NULL;
  216. id = iter.index + base;
  217. if (WARN_ON_ONCE(id > INT_MAX))
  218. return NULL;
  219. *nextid = id;
  220. return rcu_dereference_raw(*slot);
  221. }
  222. EXPORT_SYMBOL(idr_get_next);
  223. /**
  224. * idr_get_next_ul() - Find next populated entry.
  225. * @idr: IDR handle.
  226. * @nextid: Pointer to an ID.
  227. *
  228. * Returns the next populated entry in the tree with an ID greater than
  229. * or equal to the value pointed to by @nextid. On exit, @nextid is updated
  230. * to the ID of the found value. To use in a loop, the value pointed to by
  231. * nextid must be incremented by the user.
  232. */
  233. void *idr_get_next_ul(struct idr *idr, unsigned long *nextid)
  234. {
  235. struct radix_tree_iter iter;
  236. void __rcu **slot;
  237. unsigned long base = idr->idr_base;
  238. unsigned long id = *nextid;
  239. id = (id < base) ? 0 : id - base;
  240. slot = radix_tree_iter_find(&idr->idr_rt, &iter, id);
  241. if (!slot)
  242. return NULL;
  243. *nextid = iter.index + base;
  244. return rcu_dereference_raw(*slot);
  245. }
  246. EXPORT_SYMBOL(idr_get_next_ul);
  247. /**
  248. * idr_replace() - replace pointer for given ID.
  249. * @idr: IDR handle.
  250. * @ptr: New pointer to associate with the ID.
  251. * @id: ID to change.
  252. *
  253. * Replace the pointer registered with an ID and return the old value.
  254. * This function can be called under the RCU read lock concurrently with
  255. * idr_alloc() and idr_remove() (as long as the ID being removed is not
  256. * the one being replaced!).
  257. *
  258. * Returns: the old value on success. %-ENOENT indicates that @id was not
  259. * found. %-EINVAL indicates that @ptr was not valid.
  260. */
  261. void *idr_replace(struct idr *idr, void *ptr, unsigned long id)
  262. {
  263. struct radix_tree_node *node;
  264. void __rcu **slot = NULL;
  265. void *entry;
  266. id -= idr->idr_base;
  267. entry = __radix_tree_lookup(&idr->idr_rt, id, &node, &slot);
  268. if (!slot || radix_tree_tag_get(&idr->idr_rt, id, IDR_FREE))
  269. return ERR_PTR(-ENOENT);
  270. __radix_tree_replace(&idr->idr_rt, node, slot, ptr);
  271. return entry;
  272. }
  273. EXPORT_SYMBOL(idr_replace);
  274. /**
  275. * DOC: IDA description
  276. *
  277. * The IDA is an ID allocator which does not provide the ability to
  278. * associate an ID with a pointer. As such, it only needs to store one
  279. * bit per ID, and so is more space efficient than an IDR. To use an IDA,
  280. * define it using DEFINE_IDA() (or embed a &struct ida in a data structure,
  281. * then initialise it using ida_init()). To allocate a new ID, call
  282. * ida_alloc(), ida_alloc_min(), ida_alloc_max() or ida_alloc_range().
  283. * To free an ID, call ida_free().
  284. *
  285. * ida_destroy() can be used to dispose of an IDA without needing to
  286. * free the individual IDs in it. You can use ida_is_empty() to find
  287. * out whether the IDA has any IDs currently allocated.
  288. *
  289. * The IDA handles its own locking. It is safe to call any of the IDA
  290. * functions without synchronisation in your code.
  291. *
  292. * IDs are currently limited to the range [0-INT_MAX]. If this is an awkward
  293. * limitation, it should be quite straightforward to raise the maximum.
  294. */
  295. /*
  296. * Developer's notes:
  297. *
  298. * The IDA uses the functionality provided by the XArray to store bitmaps in
  299. * each entry. The XA_FREE_MARK is only cleared when all bits in the bitmap
  300. * have been set.
  301. *
  302. * I considered telling the XArray that each slot is an order-10 node
  303. * and indexing by bit number, but the XArray can't allow a single multi-index
  304. * entry in the head, which would significantly increase memory consumption
  305. * for the IDA. So instead we divide the index by the number of bits in the
  306. * leaf bitmap before doing a radix tree lookup.
  307. *
  308. * As an optimisation, if there are only a few low bits set in any given
  309. * leaf, instead of allocating a 128-byte bitmap, we store the bits
  310. * as a value entry. Value entries never have the XA_FREE_MARK cleared
  311. * because we can always convert them into a bitmap entry.
  312. *
  313. * It would be possible to optimise further; once we've run out of a
  314. * single 128-byte bitmap, we currently switch to a 576-byte node, put
  315. * the 128-byte bitmap in the first entry and then start allocating extra
  316. * 128-byte entries. We could instead use the 512 bytes of the node's
  317. * data as a bitmap before moving to that scheme. I do not believe this
  318. * is a worthwhile optimisation; Rasmus Villemoes surveyed the current
  319. * users of the IDA and almost none of them use more than 1024 entries.
  320. * Those that do use more than the 8192 IDs that the 512 bytes would
  321. * provide.
  322. *
  323. * The IDA always uses a lock to alloc/free. If we add a 'test_bit'
  324. * equivalent, it will still need locking. Going to RCU lookup would require
  325. * using RCU to free bitmaps, and that's not trivial without embedding an
  326. * RCU head in the bitmap, which adds a 2-pointer overhead to each 128-byte
  327. * bitmap, which is excessive.
  328. */
  329. /**
  330. * ida_alloc_range() - Allocate an unused ID.
  331. * @ida: IDA handle.
  332. * @min: Lowest ID to allocate.
  333. * @max: Highest ID to allocate.
  334. * @gfp: Memory allocation flags.
  335. *
  336. * Allocate an ID between @min and @max, inclusive. The allocated ID will
  337. * not exceed %INT_MAX, even if @max is larger.
  338. *
  339. * Context: Any context.
  340. * Return: The allocated ID, or %-ENOMEM if memory could not be allocated,
  341. * or %-ENOSPC if there are no free IDs.
  342. */
  343. int ida_alloc_range(struct ida *ida, unsigned int min, unsigned int max,
  344. gfp_t gfp)
  345. {
  346. XA_STATE(xas, &ida->xa, min / IDA_BITMAP_BITS);
  347. unsigned bit = min % IDA_BITMAP_BITS;
  348. unsigned long flags;
  349. struct ida_bitmap *bitmap, *alloc = NULL;
  350. if ((int)min < 0)
  351. return -ENOSPC;
  352. if ((int)max < 0)
  353. max = INT_MAX;
  354. retry:
  355. xas_lock_irqsave(&xas, flags);
  356. next:
  357. bitmap = xas_find_marked(&xas, max / IDA_BITMAP_BITS, XA_FREE_MARK);
  358. if (xas.xa_index > min / IDA_BITMAP_BITS)
  359. bit = 0;
  360. if (xas.xa_index * IDA_BITMAP_BITS + bit > max)
  361. goto nospc;
  362. if (xa_is_value(bitmap)) {
  363. unsigned long tmp = xa_to_value(bitmap);
  364. if (bit < BITS_PER_XA_VALUE) {
  365. bit = find_next_zero_bit(&tmp, BITS_PER_XA_VALUE, bit);
  366. if (xas.xa_index * IDA_BITMAP_BITS + bit > max)
  367. goto nospc;
  368. if (bit < BITS_PER_XA_VALUE) {
  369. tmp |= 1UL << bit;
  370. xas_store(&xas, xa_mk_value(tmp));
  371. goto out;
  372. }
  373. }
  374. bitmap = alloc;
  375. if (!bitmap)
  376. bitmap = kzalloc(sizeof(*bitmap), GFP_NOWAIT);
  377. if (!bitmap)
  378. goto alloc;
  379. bitmap->bitmap[0] = tmp;
  380. xas_store(&xas, bitmap);
  381. if (xas_error(&xas)) {
  382. bitmap->bitmap[0] = 0;
  383. goto out;
  384. }
  385. }
  386. if (bitmap) {
  387. bit = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, bit);
  388. if (xas.xa_index * IDA_BITMAP_BITS + bit > max)
  389. goto nospc;
  390. if (bit == IDA_BITMAP_BITS)
  391. goto next;
  392. __set_bit(bit, bitmap->bitmap);
  393. if (bitmap_full(bitmap->bitmap, IDA_BITMAP_BITS))
  394. xas_clear_mark(&xas, XA_FREE_MARK);
  395. } else {
  396. if (bit < BITS_PER_XA_VALUE) {
  397. bitmap = xa_mk_value(1UL << bit);
  398. } else {
  399. bitmap = alloc;
  400. if (!bitmap)
  401. bitmap = kzalloc(sizeof(*bitmap), GFP_NOWAIT);
  402. if (!bitmap)
  403. goto alloc;
  404. __set_bit(bit, bitmap->bitmap);
  405. }
  406. xas_store(&xas, bitmap);
  407. }
  408. out:
  409. xas_unlock_irqrestore(&xas, flags);
  410. if (xas_nomem(&xas, gfp)) {
  411. xas.xa_index = min / IDA_BITMAP_BITS;
  412. bit = min % IDA_BITMAP_BITS;
  413. goto retry;
  414. }
  415. if (bitmap != alloc)
  416. kfree(alloc);
  417. if (xas_error(&xas))
  418. return xas_error(&xas);
  419. return xas.xa_index * IDA_BITMAP_BITS + bit;
  420. alloc:
  421. xas_unlock_irqrestore(&xas, flags);
  422. alloc = kzalloc(sizeof(*bitmap), gfp);
  423. if (!alloc)
  424. return -ENOMEM;
  425. xas_set(&xas, min / IDA_BITMAP_BITS);
  426. bit = min % IDA_BITMAP_BITS;
  427. goto retry;
  428. nospc:
  429. xas_unlock_irqrestore(&xas, flags);
  430. return -ENOSPC;
  431. }
  432. EXPORT_SYMBOL(ida_alloc_range);
  433. /**
  434. * ida_free() - Release an allocated ID.
  435. * @ida: IDA handle.
  436. * @id: Previously allocated ID.
  437. *
  438. * Context: Any context.
  439. */
  440. void ida_free(struct ida *ida, unsigned int id)
  441. {
  442. XA_STATE(xas, &ida->xa, id / IDA_BITMAP_BITS);
  443. unsigned bit = id % IDA_BITMAP_BITS;
  444. struct ida_bitmap *bitmap;
  445. unsigned long flags;
  446. BUG_ON((int)id < 0);
  447. xas_lock_irqsave(&xas, flags);
  448. bitmap = xas_load(&xas);
  449. if (xa_is_value(bitmap)) {
  450. unsigned long v = xa_to_value(bitmap);
  451. if (bit >= BITS_PER_XA_VALUE)
  452. goto err;
  453. if (!(v & (1UL << bit)))
  454. goto err;
  455. v &= ~(1UL << bit);
  456. if (!v)
  457. goto delete;
  458. xas_store(&xas, xa_mk_value(v));
  459. } else {
  460. if (!test_bit(bit, bitmap->bitmap))
  461. goto err;
  462. __clear_bit(bit, bitmap->bitmap);
  463. xas_set_mark(&xas, XA_FREE_MARK);
  464. if (bitmap_empty(bitmap->bitmap, IDA_BITMAP_BITS)) {
  465. kfree(bitmap);
  466. delete:
  467. xas_store(&xas, NULL);
  468. }
  469. }
  470. xas_unlock_irqrestore(&xas, flags);
  471. return;
  472. err:
  473. xas_unlock_irqrestore(&xas, flags);
  474. WARN(1, "ida_free called for id=%d which is not allocated.\n", id);
  475. }
  476. EXPORT_SYMBOL(ida_free);
  477. /**
  478. * ida_destroy() - Free all IDs.
  479. * @ida: IDA handle.
  480. *
  481. * Calling this function frees all IDs and releases all resources used
  482. * by an IDA. When this call returns, the IDA is empty and can be reused
  483. * or freed. If the IDA is already empty, there is no need to call this
  484. * function.
  485. *
  486. * Context: Any context.
  487. */
  488. void ida_destroy(struct ida *ida)
  489. {
  490. XA_STATE(xas, &ida->xa, 0);
  491. struct ida_bitmap *bitmap;
  492. unsigned long flags;
  493. xas_lock_irqsave(&xas, flags);
  494. xas_for_each(&xas, bitmap, ULONG_MAX) {
  495. if (!xa_is_value(bitmap))
  496. kfree(bitmap);
  497. xas_store(&xas, NULL);
  498. }
  499. xas_unlock_irqrestore(&xas, flags);
  500. }
  501. EXPORT_SYMBOL(ida_destroy);
  502. #ifndef __KERNEL__
  503. extern void xa_dump_index(unsigned long index, unsigned int shift);
  504. #define IDA_CHUNK_SHIFT ilog2(IDA_BITMAP_BITS)
  505. static void ida_dump_entry(void *entry, unsigned long index)
  506. {
  507. unsigned long i;
  508. if (!entry)
  509. return;
  510. if (xa_is_node(entry)) {
  511. struct xa_node *node = xa_to_node(entry);
  512. unsigned int shift = node->shift + IDA_CHUNK_SHIFT +
  513. XA_CHUNK_SHIFT;
  514. xa_dump_index(index * IDA_BITMAP_BITS, shift);
  515. xa_dump_node(node);
  516. for (i = 0; i < XA_CHUNK_SIZE; i++)
  517. ida_dump_entry(node->slots[i],
  518. index | (i << node->shift));
  519. } else if (xa_is_value(entry)) {
  520. xa_dump_index(index * IDA_BITMAP_BITS, ilog2(BITS_PER_LONG));
  521. pr_cont("value: data %lx [%px]\n", xa_to_value(entry), entry);
  522. } else {
  523. struct ida_bitmap *bitmap = entry;
  524. xa_dump_index(index * IDA_BITMAP_BITS, IDA_CHUNK_SHIFT);
  525. pr_cont("bitmap: %p data", bitmap);
  526. for (i = 0; i < IDA_BITMAP_LONGS; i++)
  527. pr_cont(" %lx", bitmap->bitmap[i]);
  528. pr_cont("\n");
  529. }
  530. }
  531. static void ida_dump(struct ida *ida)
  532. {
  533. struct xarray *xa = &ida->xa;
  534. pr_debug("ida: %p node %p free %d\n", ida, xa->xa_head,
  535. xa->xa_flags >> ROOT_TAG_SHIFT);
  536. ida_dump_entry(xa->xa_head, 0);
  537. }
  538. #endif