radix-tree.c 62 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296
  1. /*
  2. * Copyright (C) 2001 Momchil Velikov
  3. * Portions Copyright (C) 2001 Christoph Hellwig
  4. * Copyright (C) 2005 SGI, Christoph Lameter
  5. * Copyright (C) 2006 Nick Piggin
  6. * Copyright (C) 2012 Konstantin Khlebnikov
  7. * Copyright (C) 2016 Intel, Matthew Wilcox
  8. * Copyright (C) 2016 Intel, Ross Zwisler
  9. *
  10. * This program is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU General Public License as
  12. * published by the Free Software Foundation; either version 2, or (at
  13. * your option) any later version.
  14. *
  15. * This program is distributed in the hope that it will be useful, but
  16. * WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU General Public License
  21. * along with this program; if not, write to the Free Software
  22. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  23. */
  24. #include <linux/bitmap.h>
  25. #include <linux/bitops.h>
  26. #include <linux/bug.h>
  27. #include <linux/cpu.h>
  28. #include <linux/errno.h>
  29. #include <linux/export.h>
  30. #include <linux/idr.h>
  31. #include <linux/init.h>
  32. #include <linux/kernel.h>
  33. #include <linux/kmemleak.h>
  34. #include <linux/percpu.h>
  35. #include <linux/preempt.h> /* in_interrupt() */
  36. #include <linux/radix-tree.h>
  37. #include <linux/rcupdate.h>
  38. #include <linux/slab.h>
  39. #include <linux/string.h>
  40. /* Number of nodes in fully populated tree of given height */
  41. static unsigned long height_to_maxnodes[RADIX_TREE_MAX_PATH + 1] __read_mostly;
  42. /*
  43. * Radix tree node cache.
  44. */
  45. static struct kmem_cache *radix_tree_node_cachep;
  46. /*
  47. * The radix tree is variable-height, so an insert operation not only has
  48. * to build the branch to its corresponding item, it also has to build the
  49. * branch to existing items if the size has to be increased (by
  50. * radix_tree_extend).
  51. *
  52. * The worst case is a zero height tree with just a single item at index 0,
  53. * and then inserting an item at index ULONG_MAX. This requires 2 new branches
  54. * of RADIX_TREE_MAX_PATH size to be created, with only the root node shared.
  55. * Hence:
  56. */
  57. #define RADIX_TREE_PRELOAD_SIZE (RADIX_TREE_MAX_PATH * 2 - 1)
  58. /*
  59. * The IDR does not have to be as high as the radix tree since it uses
  60. * signed integers, not unsigned longs.
  61. */
  62. #define IDR_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(int) - 1)
  63. #define IDR_MAX_PATH (DIV_ROUND_UP(IDR_INDEX_BITS, \
  64. RADIX_TREE_MAP_SHIFT))
  65. #define IDR_PRELOAD_SIZE (IDR_MAX_PATH * 2 - 1)
  66. /*
  67. * The IDA is even shorter since it uses a bitmap at the last level.
  68. */
  69. #define IDA_INDEX_BITS (8 * sizeof(int) - 1 - ilog2(IDA_BITMAP_BITS))
  70. #define IDA_MAX_PATH (DIV_ROUND_UP(IDA_INDEX_BITS, \
  71. RADIX_TREE_MAP_SHIFT))
  72. #define IDA_PRELOAD_SIZE (IDA_MAX_PATH * 2 - 1)
  73. /*
  74. * Per-cpu pool of preloaded nodes
  75. */
  76. struct radix_tree_preload {
  77. unsigned nr;
  78. /* nodes->parent points to next preallocated node */
  79. struct radix_tree_node *nodes;
  80. };
  81. static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
  82. static inline struct radix_tree_node *entry_to_node(void *ptr)
  83. {
  84. return (void *)((unsigned long)ptr & ~RADIX_TREE_INTERNAL_NODE);
  85. }
  86. static inline void *node_to_entry(void *ptr)
  87. {
  88. return (void *)((unsigned long)ptr | RADIX_TREE_INTERNAL_NODE);
  89. }
  90. #define RADIX_TREE_RETRY node_to_entry(NULL)
  91. #ifdef CONFIG_RADIX_TREE_MULTIORDER
  92. /* Sibling slots point directly to another slot in the same node */
  93. static inline
  94. bool is_sibling_entry(const struct radix_tree_node *parent, void *node)
  95. {
  96. void __rcu **ptr = node;
  97. return (parent->slots <= ptr) &&
  98. (ptr < parent->slots + RADIX_TREE_MAP_SIZE);
  99. }
  100. #else
  101. static inline
  102. bool is_sibling_entry(const struct radix_tree_node *parent, void *node)
  103. {
  104. return false;
  105. }
  106. #endif
  107. static inline unsigned long
  108. get_slot_offset(const struct radix_tree_node *parent, void __rcu **slot)
  109. {
  110. return slot - parent->slots;
  111. }
  112. static unsigned int radix_tree_descend(const struct radix_tree_node *parent,
  113. struct radix_tree_node **nodep, unsigned long index)
  114. {
  115. unsigned int offset = (index >> parent->shift) & RADIX_TREE_MAP_MASK;
  116. void __rcu **entry = rcu_dereference_raw(parent->slots[offset]);
  117. #ifdef CONFIG_RADIX_TREE_MULTIORDER
  118. if (radix_tree_is_internal_node(entry)) {
  119. if (is_sibling_entry(parent, entry)) {
  120. void __rcu **sibentry;
  121. sibentry = (void __rcu **) entry_to_node(entry);
  122. offset = get_slot_offset(parent, sibentry);
  123. entry = rcu_dereference_raw(*sibentry);
  124. }
  125. }
  126. #endif
  127. *nodep = (void *)entry;
  128. return offset;
  129. }
  130. static inline gfp_t root_gfp_mask(const struct radix_tree_root *root)
  131. {
  132. return root->gfp_mask & __GFP_BITS_MASK;
  133. }
  134. static inline void tag_set(struct radix_tree_node *node, unsigned int tag,
  135. int offset)
  136. {
  137. __set_bit(offset, node->tags[tag]);
  138. }
  139. static inline void tag_clear(struct radix_tree_node *node, unsigned int tag,
  140. int offset)
  141. {
  142. __clear_bit(offset, node->tags[tag]);
  143. }
  144. static inline int tag_get(const struct radix_tree_node *node, unsigned int tag,
  145. int offset)
  146. {
  147. return test_bit(offset, node->tags[tag]);
  148. }
  149. static inline void root_tag_set(struct radix_tree_root *root, unsigned tag)
  150. {
  151. root->gfp_mask |= (__force gfp_t)(1 << (tag + ROOT_TAG_SHIFT));
  152. }
  153. static inline void root_tag_clear(struct radix_tree_root *root, unsigned tag)
  154. {
  155. root->gfp_mask &= (__force gfp_t)~(1 << (tag + ROOT_TAG_SHIFT));
  156. }
  157. static inline void root_tag_clear_all(struct radix_tree_root *root)
  158. {
  159. root->gfp_mask &= (1 << ROOT_TAG_SHIFT) - 1;
  160. }
  161. static inline int root_tag_get(const struct radix_tree_root *root, unsigned tag)
  162. {
  163. return (__force int)root->gfp_mask & (1 << (tag + ROOT_TAG_SHIFT));
  164. }
  165. static inline unsigned root_tags_get(const struct radix_tree_root *root)
  166. {
  167. return (__force unsigned)root->gfp_mask >> ROOT_TAG_SHIFT;
  168. }
  169. static inline bool is_idr(const struct radix_tree_root *root)
  170. {
  171. return !!(root->gfp_mask & ROOT_IS_IDR);
  172. }
  173. /*
  174. * Returns 1 if any slot in the node has this tag set.
  175. * Otherwise returns 0.
  176. */
  177. static inline int any_tag_set(const struct radix_tree_node *node,
  178. unsigned int tag)
  179. {
  180. unsigned idx;
  181. for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) {
  182. if (node->tags[tag][idx])
  183. return 1;
  184. }
  185. return 0;
  186. }
  187. static inline void all_tag_set(struct radix_tree_node *node, unsigned int tag)
  188. {
  189. bitmap_fill(node->tags[tag], RADIX_TREE_MAP_SIZE);
  190. }
  191. /**
  192. * radix_tree_find_next_bit - find the next set bit in a memory region
  193. *
  194. * @addr: The address to base the search on
  195. * @size: The bitmap size in bits
  196. * @offset: The bitnumber to start searching at
  197. *
  198. * Unrollable variant of find_next_bit() for constant size arrays.
  199. * Tail bits starting from size to roundup(size, BITS_PER_LONG) must be zero.
  200. * Returns next bit offset, or size if nothing found.
  201. */
  202. static __always_inline unsigned long
  203. radix_tree_find_next_bit(struct radix_tree_node *node, unsigned int tag,
  204. unsigned long offset)
  205. {
  206. const unsigned long *addr = node->tags[tag];
  207. if (offset < RADIX_TREE_MAP_SIZE) {
  208. unsigned long tmp;
  209. addr += offset / BITS_PER_LONG;
  210. tmp = *addr >> (offset % BITS_PER_LONG);
  211. if (tmp)
  212. return __ffs(tmp) + offset;
  213. offset = (offset + BITS_PER_LONG) & ~(BITS_PER_LONG - 1);
  214. while (offset < RADIX_TREE_MAP_SIZE) {
  215. tmp = *++addr;
  216. if (tmp)
  217. return __ffs(tmp) + offset;
  218. offset += BITS_PER_LONG;
  219. }
  220. }
  221. return RADIX_TREE_MAP_SIZE;
  222. }
  223. static unsigned int iter_offset(const struct radix_tree_iter *iter)
  224. {
  225. return (iter->index >> iter_shift(iter)) & RADIX_TREE_MAP_MASK;
  226. }
  227. /*
  228. * The maximum index which can be stored in a radix tree
  229. */
  230. static inline unsigned long shift_maxindex(unsigned int shift)
  231. {
  232. return (RADIX_TREE_MAP_SIZE << shift) - 1;
  233. }
  234. static inline unsigned long node_maxindex(const struct radix_tree_node *node)
  235. {
  236. return shift_maxindex(node->shift);
  237. }
  238. static unsigned long next_index(unsigned long index,
  239. const struct radix_tree_node *node,
  240. unsigned long offset)
  241. {
  242. return (index & ~node_maxindex(node)) + (offset << node->shift);
  243. }
  244. #ifndef __KERNEL__
  245. static void dump_node(struct radix_tree_node *node, unsigned long index)
  246. {
  247. unsigned long i;
  248. pr_debug("radix node: %p offset %d indices %lu-%lu parent %p tags %lx %lx %lx shift %d count %d exceptional %d\n",
  249. node, node->offset, index, index | node_maxindex(node),
  250. node->parent,
  251. node->tags[0][0], node->tags[1][0], node->tags[2][0],
  252. node->shift, node->count, node->exceptional);
  253. for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) {
  254. unsigned long first = index | (i << node->shift);
  255. unsigned long last = first | ((1UL << node->shift) - 1);
  256. void *entry = node->slots[i];
  257. if (!entry)
  258. continue;
  259. if (entry == RADIX_TREE_RETRY) {
  260. pr_debug("radix retry offset %ld indices %lu-%lu parent %p\n",
  261. i, first, last, node);
  262. } else if (!radix_tree_is_internal_node(entry)) {
  263. pr_debug("radix entry %p offset %ld indices %lu-%lu parent %p\n",
  264. entry, i, first, last, node);
  265. } else if (is_sibling_entry(node, entry)) {
  266. pr_debug("radix sblng %p offset %ld indices %lu-%lu parent %p val %p\n",
  267. entry, i, first, last, node,
  268. *(void **)entry_to_node(entry));
  269. } else {
  270. dump_node(entry_to_node(entry), first);
  271. }
  272. }
  273. }
  274. /* For debug */
  275. static void radix_tree_dump(struct radix_tree_root *root)
  276. {
  277. pr_debug("radix root: %p rnode %p tags %x\n",
  278. root, root->rnode,
  279. root->gfp_mask >> ROOT_TAG_SHIFT);
  280. if (!radix_tree_is_internal_node(root->rnode))
  281. return;
  282. dump_node(entry_to_node(root->rnode), 0);
  283. }
  284. static void dump_ida_node(void *entry, unsigned long index)
  285. {
  286. unsigned long i;
  287. if (!entry)
  288. return;
  289. if (radix_tree_is_internal_node(entry)) {
  290. struct radix_tree_node *node = entry_to_node(entry);
  291. pr_debug("ida node: %p offset %d indices %lu-%lu parent %p free %lx shift %d count %d\n",
  292. node, node->offset, index * IDA_BITMAP_BITS,
  293. ((index | node_maxindex(node)) + 1) *
  294. IDA_BITMAP_BITS - 1,
  295. node->parent, node->tags[0][0], node->shift,
  296. node->count);
  297. for (i = 0; i < RADIX_TREE_MAP_SIZE; i++)
  298. dump_ida_node(node->slots[i],
  299. index | (i << node->shift));
  300. } else if (radix_tree_exceptional_entry(entry)) {
  301. pr_debug("ida excp: %p offset %d indices %lu-%lu data %lx\n",
  302. entry, (int)(index & RADIX_TREE_MAP_MASK),
  303. index * IDA_BITMAP_BITS,
  304. index * IDA_BITMAP_BITS + BITS_PER_LONG -
  305. RADIX_TREE_EXCEPTIONAL_SHIFT,
  306. (unsigned long)entry >>
  307. RADIX_TREE_EXCEPTIONAL_SHIFT);
  308. } else {
  309. struct ida_bitmap *bitmap = entry;
  310. pr_debug("ida btmp: %p offset %d indices %lu-%lu data", bitmap,
  311. (int)(index & RADIX_TREE_MAP_MASK),
  312. index * IDA_BITMAP_BITS,
  313. (index + 1) * IDA_BITMAP_BITS - 1);
  314. for (i = 0; i < IDA_BITMAP_LONGS; i++)
  315. pr_cont(" %lx", bitmap->bitmap[i]);
  316. pr_cont("\n");
  317. }
  318. }
  319. static void ida_dump(struct ida *ida)
  320. {
  321. struct radix_tree_root *root = &ida->ida_rt;
  322. pr_debug("ida: %p node %p free %d\n", ida, root->rnode,
  323. root->gfp_mask >> ROOT_TAG_SHIFT);
  324. dump_ida_node(root->rnode, 0);
  325. }
  326. #endif
  327. /*
  328. * This assumes that the caller has performed appropriate preallocation, and
  329. * that the caller has pinned this thread of control to the current CPU.
  330. */
  331. static struct radix_tree_node *
  332. radix_tree_node_alloc(gfp_t gfp_mask, struct radix_tree_node *parent,
  333. struct radix_tree_root *root,
  334. unsigned int shift, unsigned int offset,
  335. unsigned int count, unsigned int exceptional)
  336. {
  337. struct radix_tree_node *ret = NULL;
  338. /*
  339. * Preload code isn't irq safe and it doesn't make sense to use
  340. * preloading during an interrupt anyway as all the allocations have
  341. * to be atomic. So just do normal allocation when in interrupt.
  342. */
  343. if (!gfpflags_allow_blocking(gfp_mask) && !in_interrupt()) {
  344. struct radix_tree_preload *rtp;
  345. /*
  346. * Even if the caller has preloaded, try to allocate from the
  347. * cache first for the new node to get accounted to the memory
  348. * cgroup.
  349. */
  350. ret = kmem_cache_alloc(radix_tree_node_cachep,
  351. gfp_mask | __GFP_NOWARN);
  352. if (ret)
  353. goto out;
  354. /*
  355. * Provided the caller has preloaded here, we will always
  356. * succeed in getting a node here (and never reach
  357. * kmem_cache_alloc)
  358. */
  359. rtp = this_cpu_ptr(&radix_tree_preloads);
  360. if (rtp->nr) {
  361. ret = rtp->nodes;
  362. rtp->nodes = ret->parent;
  363. rtp->nr--;
  364. }
  365. /*
  366. * Update the allocation stack trace as this is more useful
  367. * for debugging.
  368. */
  369. kmemleak_update_trace(ret);
  370. goto out;
  371. }
  372. ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
  373. out:
  374. BUG_ON(radix_tree_is_internal_node(ret));
  375. if (ret) {
  376. ret->shift = shift;
  377. ret->offset = offset;
  378. ret->count = count;
  379. ret->exceptional = exceptional;
  380. ret->parent = parent;
  381. ret->root = root;
  382. }
  383. return ret;
  384. }
  385. static void radix_tree_node_rcu_free(struct rcu_head *head)
  386. {
  387. struct radix_tree_node *node =
  388. container_of(head, struct radix_tree_node, rcu_head);
  389. /*
  390. * Must only free zeroed nodes into the slab. We can be left with
  391. * non-NULL entries by radix_tree_free_nodes, so clear the entries
  392. * and tags here.
  393. */
  394. memset(node->slots, 0, sizeof(node->slots));
  395. memset(node->tags, 0, sizeof(node->tags));
  396. INIT_LIST_HEAD(&node->private_list);
  397. kmem_cache_free(radix_tree_node_cachep, node);
  398. }
  399. static inline void
  400. radix_tree_node_free(struct radix_tree_node *node)
  401. {
  402. call_rcu(&node->rcu_head, radix_tree_node_rcu_free);
  403. }
  404. /*
  405. * Load up this CPU's radix_tree_node buffer with sufficient objects to
  406. * ensure that the addition of a single element in the tree cannot fail. On
  407. * success, return zero, with preemption disabled. On error, return -ENOMEM
  408. * with preemption not disabled.
  409. *
  410. * To make use of this facility, the radix tree must be initialised without
  411. * __GFP_DIRECT_RECLAIM being passed to INIT_RADIX_TREE().
  412. */
  413. static __must_check int __radix_tree_preload(gfp_t gfp_mask, unsigned nr)
  414. {
  415. struct radix_tree_preload *rtp;
  416. struct radix_tree_node *node;
  417. int ret = -ENOMEM;
  418. /*
  419. * Nodes preloaded by one cgroup can be be used by another cgroup, so
  420. * they should never be accounted to any particular memory cgroup.
  421. */
  422. gfp_mask &= ~__GFP_ACCOUNT;
  423. preempt_disable();
  424. rtp = this_cpu_ptr(&radix_tree_preloads);
  425. while (rtp->nr < nr) {
  426. preempt_enable();
  427. node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
  428. if (node == NULL)
  429. goto out;
  430. preempt_disable();
  431. rtp = this_cpu_ptr(&radix_tree_preloads);
  432. if (rtp->nr < nr) {
  433. node->parent = rtp->nodes;
  434. rtp->nodes = node;
  435. rtp->nr++;
  436. } else {
  437. kmem_cache_free(radix_tree_node_cachep, node);
  438. }
  439. }
  440. ret = 0;
  441. out:
  442. return ret;
  443. }
  444. /*
  445. * Load up this CPU's radix_tree_node buffer with sufficient objects to
  446. * ensure that the addition of a single element in the tree cannot fail. On
  447. * success, return zero, with preemption disabled. On error, return -ENOMEM
  448. * with preemption not disabled.
  449. *
  450. * To make use of this facility, the radix tree must be initialised without
  451. * __GFP_DIRECT_RECLAIM being passed to INIT_RADIX_TREE().
  452. */
  453. int radix_tree_preload(gfp_t gfp_mask)
  454. {
  455. /* Warn on non-sensical use... */
  456. WARN_ON_ONCE(!gfpflags_allow_blocking(gfp_mask));
  457. return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE);
  458. }
  459. EXPORT_SYMBOL(radix_tree_preload);
  460. /*
  461. * The same as above function, except we don't guarantee preloading happens.
  462. * We do it, if we decide it helps. On success, return zero with preemption
  463. * disabled. On error, return -ENOMEM with preemption not disabled.
  464. */
  465. int radix_tree_maybe_preload(gfp_t gfp_mask)
  466. {
  467. if (gfpflags_allow_blocking(gfp_mask))
  468. return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE);
  469. /* Preloading doesn't help anything with this gfp mask, skip it */
  470. preempt_disable();
  471. return 0;
  472. }
  473. EXPORT_SYMBOL(radix_tree_maybe_preload);
  474. #ifdef CONFIG_RADIX_TREE_MULTIORDER
  475. /*
  476. * Preload with enough objects to ensure that we can split a single entry
  477. * of order @old_order into many entries of size @new_order
  478. */
  479. int radix_tree_split_preload(unsigned int old_order, unsigned int new_order,
  480. gfp_t gfp_mask)
  481. {
  482. unsigned top = 1 << (old_order % RADIX_TREE_MAP_SHIFT);
  483. unsigned layers = (old_order / RADIX_TREE_MAP_SHIFT) -
  484. (new_order / RADIX_TREE_MAP_SHIFT);
  485. unsigned nr = 0;
  486. WARN_ON_ONCE(!gfpflags_allow_blocking(gfp_mask));
  487. BUG_ON(new_order >= old_order);
  488. while (layers--)
  489. nr = nr * RADIX_TREE_MAP_SIZE + 1;
  490. return __radix_tree_preload(gfp_mask, top * nr);
  491. }
  492. #endif
  493. /*
  494. * The same as function above, but preload number of nodes required to insert
  495. * (1 << order) continuous naturally-aligned elements.
  496. */
  497. int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order)
  498. {
  499. unsigned long nr_subtrees;
  500. int nr_nodes, subtree_height;
  501. /* Preloading doesn't help anything with this gfp mask, skip it */
  502. if (!gfpflags_allow_blocking(gfp_mask)) {
  503. preempt_disable();
  504. return 0;
  505. }
  506. /*
  507. * Calculate number and height of fully populated subtrees it takes to
  508. * store (1 << order) elements.
  509. */
  510. nr_subtrees = 1 << order;
  511. for (subtree_height = 0; nr_subtrees > RADIX_TREE_MAP_SIZE;
  512. subtree_height++)
  513. nr_subtrees >>= RADIX_TREE_MAP_SHIFT;
  514. /*
  515. * The worst case is zero height tree with a single item at index 0 and
  516. * then inserting items starting at ULONG_MAX - (1 << order).
  517. *
  518. * This requires RADIX_TREE_MAX_PATH nodes to build branch from root to
  519. * 0-index item.
  520. */
  521. nr_nodes = RADIX_TREE_MAX_PATH;
  522. /* Plus branch to fully populated subtrees. */
  523. nr_nodes += RADIX_TREE_MAX_PATH - subtree_height;
  524. /* Root node is shared. */
  525. nr_nodes--;
  526. /* Plus nodes required to build subtrees. */
  527. nr_nodes += nr_subtrees * height_to_maxnodes[subtree_height];
  528. return __radix_tree_preload(gfp_mask, nr_nodes);
  529. }
  530. static unsigned radix_tree_load_root(const struct radix_tree_root *root,
  531. struct radix_tree_node **nodep, unsigned long *maxindex)
  532. {
  533. struct radix_tree_node *node = rcu_dereference_raw(root->rnode);
  534. *nodep = node;
  535. if (likely(radix_tree_is_internal_node(node))) {
  536. node = entry_to_node(node);
  537. *maxindex = node_maxindex(node);
  538. return node->shift + RADIX_TREE_MAP_SHIFT;
  539. }
  540. *maxindex = 0;
  541. return 0;
  542. }
  543. /*
  544. * Extend a radix tree so it can store key @index.
  545. */
  546. static int radix_tree_extend(struct radix_tree_root *root, gfp_t gfp,
  547. unsigned long index, unsigned int shift)
  548. {
  549. void *entry;
  550. unsigned int maxshift;
  551. int tag;
  552. /* Figure out what the shift should be. */
  553. maxshift = shift;
  554. while (index > shift_maxindex(maxshift))
  555. maxshift += RADIX_TREE_MAP_SHIFT;
  556. entry = rcu_dereference_raw(root->rnode);
  557. if (!entry && (!is_idr(root) || root_tag_get(root, IDR_FREE)))
  558. goto out;
  559. do {
  560. struct radix_tree_node *node = radix_tree_node_alloc(gfp, NULL,
  561. root, shift, 0, 1, 0);
  562. if (!node)
  563. return -ENOMEM;
  564. if (is_idr(root)) {
  565. all_tag_set(node, IDR_FREE);
  566. if (!root_tag_get(root, IDR_FREE)) {
  567. tag_clear(node, IDR_FREE, 0);
  568. root_tag_set(root, IDR_FREE);
  569. }
  570. } else {
  571. /* Propagate the aggregated tag info to the new child */
  572. for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) {
  573. if (root_tag_get(root, tag))
  574. tag_set(node, tag, 0);
  575. }
  576. }
  577. BUG_ON(shift > BITS_PER_LONG);
  578. if (radix_tree_is_internal_node(entry)) {
  579. entry_to_node(entry)->parent = node;
  580. } else if (radix_tree_exceptional_entry(entry)) {
  581. /* Moving an exceptional root->rnode to a node */
  582. node->exceptional = 1;
  583. }
  584. /*
  585. * entry was already in the radix tree, so we do not need
  586. * rcu_assign_pointer here
  587. */
  588. node->slots[0] = (void __rcu *)entry;
  589. entry = node_to_entry(node);
  590. rcu_assign_pointer(root->rnode, entry);
  591. shift += RADIX_TREE_MAP_SHIFT;
  592. } while (shift <= maxshift);
  593. out:
  594. return maxshift + RADIX_TREE_MAP_SHIFT;
  595. }
  596. /**
  597. * radix_tree_shrink - shrink radix tree to minimum height
  598. * @root radix tree root
  599. */
  600. static inline bool radix_tree_shrink(struct radix_tree_root *root,
  601. radix_tree_update_node_t update_node)
  602. {
  603. bool shrunk = false;
  604. for (;;) {
  605. struct radix_tree_node *node = rcu_dereference_raw(root->rnode);
  606. struct radix_tree_node *child;
  607. if (!radix_tree_is_internal_node(node))
  608. break;
  609. node = entry_to_node(node);
  610. /*
  611. * The candidate node has more than one child, or its child
  612. * is not at the leftmost slot, or the child is a multiorder
  613. * entry, we cannot shrink.
  614. */
  615. if (node->count != 1)
  616. break;
  617. child = rcu_dereference_raw(node->slots[0]);
  618. if (!child)
  619. break;
  620. if (!radix_tree_is_internal_node(child) && node->shift)
  621. break;
  622. if (radix_tree_is_internal_node(child))
  623. entry_to_node(child)->parent = NULL;
  624. /*
  625. * We don't need rcu_assign_pointer(), since we are simply
  626. * moving the node from one part of the tree to another: if it
  627. * was safe to dereference the old pointer to it
  628. * (node->slots[0]), it will be safe to dereference the new
  629. * one (root->rnode) as far as dependent read barriers go.
  630. */
  631. root->rnode = (void __rcu *)child;
  632. if (is_idr(root) && !tag_get(node, IDR_FREE, 0))
  633. root_tag_clear(root, IDR_FREE);
  634. /*
  635. * We have a dilemma here. The node's slot[0] must not be
  636. * NULLed in case there are concurrent lookups expecting to
  637. * find the item. However if this was a bottom-level node,
  638. * then it may be subject to the slot pointer being visible
  639. * to callers dereferencing it. If item corresponding to
  640. * slot[0] is subsequently deleted, these callers would expect
  641. * their slot to become empty sooner or later.
  642. *
  643. * For example, lockless pagecache will look up a slot, deref
  644. * the page pointer, and if the page has 0 refcount it means it
  645. * was concurrently deleted from pagecache so try the deref
  646. * again. Fortunately there is already a requirement for logic
  647. * to retry the entire slot lookup -- the indirect pointer
  648. * problem (replacing direct root node with an indirect pointer
  649. * also results in a stale slot). So tag the slot as indirect
  650. * to force callers to retry.
  651. */
  652. node->count = 0;
  653. if (!radix_tree_is_internal_node(child)) {
  654. node->slots[0] = (void __rcu *)RADIX_TREE_RETRY;
  655. if (update_node)
  656. update_node(node);
  657. }
  658. WARN_ON_ONCE(!list_empty(&node->private_list));
  659. radix_tree_node_free(node);
  660. shrunk = true;
  661. }
  662. return shrunk;
  663. }
  664. static bool delete_node(struct radix_tree_root *root,
  665. struct radix_tree_node *node,
  666. radix_tree_update_node_t update_node)
  667. {
  668. bool deleted = false;
  669. do {
  670. struct radix_tree_node *parent;
  671. if (node->count) {
  672. if (node_to_entry(node) ==
  673. rcu_dereference_raw(root->rnode))
  674. deleted |= radix_tree_shrink(root,
  675. update_node);
  676. return deleted;
  677. }
  678. parent = node->parent;
  679. if (parent) {
  680. parent->slots[node->offset] = NULL;
  681. parent->count--;
  682. } else {
  683. /*
  684. * Shouldn't the tags already have all been cleared
  685. * by the caller?
  686. */
  687. if (!is_idr(root))
  688. root_tag_clear_all(root);
  689. root->rnode = NULL;
  690. }
  691. WARN_ON_ONCE(!list_empty(&node->private_list));
  692. radix_tree_node_free(node);
  693. deleted = true;
  694. node = parent;
  695. } while (node);
  696. return deleted;
  697. }
  698. /**
  699. * __radix_tree_create - create a slot in a radix tree
  700. * @root: radix tree root
  701. * @index: index key
  702. * @order: index occupies 2^order aligned slots
  703. * @nodep: returns node
  704. * @slotp: returns slot
  705. *
  706. * Create, if necessary, and return the node and slot for an item
  707. * at position @index in the radix tree @root.
  708. *
  709. * Until there is more than one item in the tree, no nodes are
  710. * allocated and @root->rnode is used as a direct slot instead of
  711. * pointing to a node, in which case *@nodep will be NULL.
  712. *
  713. * Returns -ENOMEM, or 0 for success.
  714. */
  715. int __radix_tree_create(struct radix_tree_root *root, unsigned long index,
  716. unsigned order, struct radix_tree_node **nodep,
  717. void __rcu ***slotp)
  718. {
  719. struct radix_tree_node *node = NULL, *child;
  720. void __rcu **slot = (void __rcu **)&root->rnode;
  721. unsigned long maxindex;
  722. unsigned int shift, offset = 0;
  723. unsigned long max = index | ((1UL << order) - 1);
  724. gfp_t gfp = root_gfp_mask(root);
  725. shift = radix_tree_load_root(root, &child, &maxindex);
  726. /* Make sure the tree is high enough. */
  727. if (order > 0 && max == ((1UL << order) - 1))
  728. max++;
  729. if (max > maxindex) {
  730. int error = radix_tree_extend(root, gfp, max, shift);
  731. if (error < 0)
  732. return error;
  733. shift = error;
  734. child = rcu_dereference_raw(root->rnode);
  735. }
  736. while (shift > order) {
  737. shift -= RADIX_TREE_MAP_SHIFT;
  738. if (child == NULL) {
  739. /* Have to add a child node. */
  740. child = radix_tree_node_alloc(gfp, node, root, shift,
  741. offset, 0, 0);
  742. if (!child)
  743. return -ENOMEM;
  744. rcu_assign_pointer(*slot, node_to_entry(child));
  745. if (node)
  746. node->count++;
  747. } else if (!radix_tree_is_internal_node(child))
  748. break;
  749. /* Go a level down */
  750. node = entry_to_node(child);
  751. offset = radix_tree_descend(node, &child, index);
  752. slot = &node->slots[offset];
  753. }
  754. if (nodep)
  755. *nodep = node;
  756. if (slotp)
  757. *slotp = slot;
  758. return 0;
  759. }
  760. /*
  761. * Free any nodes below this node. The tree is presumed to not need
  762. * shrinking, and any user data in the tree is presumed to not need a
  763. * destructor called on it. If we need to add a destructor, we can
  764. * add that functionality later. Note that we may not clear tags or
  765. * slots from the tree as an RCU walker may still have a pointer into
  766. * this subtree. We could replace the entries with RADIX_TREE_RETRY,
  767. * but we'll still have to clear those in rcu_free.
  768. */
  769. static void radix_tree_free_nodes(struct radix_tree_node *node)
  770. {
  771. unsigned offset = 0;
  772. struct radix_tree_node *child = entry_to_node(node);
  773. for (;;) {
  774. void *entry = rcu_dereference_raw(child->slots[offset]);
  775. if (radix_tree_is_internal_node(entry) &&
  776. !is_sibling_entry(child, entry)) {
  777. child = entry_to_node(entry);
  778. offset = 0;
  779. continue;
  780. }
  781. offset++;
  782. while (offset == RADIX_TREE_MAP_SIZE) {
  783. struct radix_tree_node *old = child;
  784. offset = child->offset + 1;
  785. child = child->parent;
  786. WARN_ON_ONCE(!list_empty(&old->private_list));
  787. radix_tree_node_free(old);
  788. if (old == entry_to_node(node))
  789. return;
  790. }
  791. }
  792. }
  793. #ifdef CONFIG_RADIX_TREE_MULTIORDER
  794. static inline int insert_entries(struct radix_tree_node *node,
  795. void __rcu **slot, void *item, unsigned order, bool replace)
  796. {
  797. struct radix_tree_node *child;
  798. unsigned i, n, tag, offset, tags = 0;
  799. if (node) {
  800. if (order > node->shift)
  801. n = 1 << (order - node->shift);
  802. else
  803. n = 1;
  804. offset = get_slot_offset(node, slot);
  805. } else {
  806. n = 1;
  807. offset = 0;
  808. }
  809. if (n > 1) {
  810. offset = offset & ~(n - 1);
  811. slot = &node->slots[offset];
  812. }
  813. child = node_to_entry(slot);
  814. for (i = 0; i < n; i++) {
  815. if (slot[i]) {
  816. if (replace) {
  817. node->count--;
  818. for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
  819. if (tag_get(node, tag, offset + i))
  820. tags |= 1 << tag;
  821. } else
  822. return -EEXIST;
  823. }
  824. }
  825. for (i = 0; i < n; i++) {
  826. struct radix_tree_node *old = rcu_dereference_raw(slot[i]);
  827. if (i) {
  828. rcu_assign_pointer(slot[i], child);
  829. for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
  830. if (tags & (1 << tag))
  831. tag_clear(node, tag, offset + i);
  832. } else {
  833. rcu_assign_pointer(slot[i], item);
  834. for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
  835. if (tags & (1 << tag))
  836. tag_set(node, tag, offset);
  837. }
  838. if (radix_tree_is_internal_node(old) &&
  839. !is_sibling_entry(node, old) &&
  840. (old != RADIX_TREE_RETRY))
  841. radix_tree_free_nodes(old);
  842. if (radix_tree_exceptional_entry(old))
  843. node->exceptional--;
  844. }
  845. if (node) {
  846. node->count += n;
  847. if (radix_tree_exceptional_entry(item))
  848. node->exceptional += n;
  849. }
  850. return n;
  851. }
  852. #else
  853. static inline int insert_entries(struct radix_tree_node *node,
  854. void __rcu **slot, void *item, unsigned order, bool replace)
  855. {
  856. if (*slot)
  857. return -EEXIST;
  858. rcu_assign_pointer(*slot, item);
  859. if (node) {
  860. node->count++;
  861. if (radix_tree_exceptional_entry(item))
  862. node->exceptional++;
  863. }
  864. return 1;
  865. }
  866. #endif
  867. /**
  868. * __radix_tree_insert - insert into a radix tree
  869. * @root: radix tree root
  870. * @index: index key
  871. * @order: key covers the 2^order indices around index
  872. * @item: item to insert
  873. *
  874. * Insert an item into the radix tree at position @index.
  875. */
  876. int __radix_tree_insert(struct radix_tree_root *root, unsigned long index,
  877. unsigned order, void *item)
  878. {
  879. struct radix_tree_node *node;
  880. void __rcu **slot;
  881. int error;
  882. BUG_ON(radix_tree_is_internal_node(item));
  883. error = __radix_tree_create(root, index, order, &node, &slot);
  884. if (error)
  885. return error;
  886. error = insert_entries(node, slot, item, order, false);
  887. if (error < 0)
  888. return error;
  889. if (node) {
  890. unsigned offset = get_slot_offset(node, slot);
  891. BUG_ON(tag_get(node, 0, offset));
  892. BUG_ON(tag_get(node, 1, offset));
  893. BUG_ON(tag_get(node, 2, offset));
  894. } else {
  895. BUG_ON(root_tags_get(root));
  896. }
  897. return 0;
  898. }
  899. EXPORT_SYMBOL(__radix_tree_insert);
  900. /**
  901. * __radix_tree_lookup - lookup an item in a radix tree
  902. * @root: radix tree root
  903. * @index: index key
  904. * @nodep: returns node
  905. * @slotp: returns slot
  906. *
  907. * Lookup and return the item at position @index in the radix
  908. * tree @root.
  909. *
  910. * Until there is more than one item in the tree, no nodes are
  911. * allocated and @root->rnode is used as a direct slot instead of
  912. * pointing to a node, in which case *@nodep will be NULL.
  913. */
  914. void *__radix_tree_lookup(const struct radix_tree_root *root,
  915. unsigned long index, struct radix_tree_node **nodep,
  916. void __rcu ***slotp)
  917. {
  918. struct radix_tree_node *node, *parent;
  919. unsigned long maxindex;
  920. void __rcu **slot;
  921. restart:
  922. parent = NULL;
  923. slot = (void __rcu **)&root->rnode;
  924. radix_tree_load_root(root, &node, &maxindex);
  925. if (index > maxindex)
  926. return NULL;
  927. while (radix_tree_is_internal_node(node)) {
  928. unsigned offset;
  929. if (node == RADIX_TREE_RETRY)
  930. goto restart;
  931. parent = entry_to_node(node);
  932. offset = radix_tree_descend(parent, &node, index);
  933. slot = parent->slots + offset;
  934. }
  935. if (nodep)
  936. *nodep = parent;
  937. if (slotp)
  938. *slotp = slot;
  939. return node;
  940. }
  941. /**
  942. * radix_tree_lookup_slot - lookup a slot in a radix tree
  943. * @root: radix tree root
  944. * @index: index key
  945. *
  946. * Returns: the slot corresponding to the position @index in the
  947. * radix tree @root. This is useful for update-if-exists operations.
  948. *
  949. * This function can be called under rcu_read_lock iff the slot is not
  950. * modified by radix_tree_replace_slot, otherwise it must be called
  951. * exclusive from other writers. Any dereference of the slot must be done
  952. * using radix_tree_deref_slot.
  953. */
  954. void __rcu **radix_tree_lookup_slot(const struct radix_tree_root *root,
  955. unsigned long index)
  956. {
  957. void __rcu **slot;
  958. if (!__radix_tree_lookup(root, index, NULL, &slot))
  959. return NULL;
  960. return slot;
  961. }
  962. EXPORT_SYMBOL(radix_tree_lookup_slot);
  963. /**
  964. * radix_tree_lookup - perform lookup operation on a radix tree
  965. * @root: radix tree root
  966. * @index: index key
  967. *
  968. * Lookup the item at the position @index in the radix tree @root.
  969. *
  970. * This function can be called under rcu_read_lock, however the caller
  971. * must manage lifetimes of leaf nodes (eg. RCU may also be used to free
  972. * them safely). No RCU barriers are required to access or modify the
  973. * returned item, however.
  974. */
  975. void *radix_tree_lookup(const struct radix_tree_root *root, unsigned long index)
  976. {
  977. return __radix_tree_lookup(root, index, NULL, NULL);
  978. }
  979. EXPORT_SYMBOL(radix_tree_lookup);
  980. static inline void replace_sibling_entries(struct radix_tree_node *node,
  981. void __rcu **slot, int count, int exceptional)
  982. {
  983. #ifdef CONFIG_RADIX_TREE_MULTIORDER
  984. void *ptr = node_to_entry(slot);
  985. unsigned offset = get_slot_offset(node, slot) + 1;
  986. while (offset < RADIX_TREE_MAP_SIZE) {
  987. if (rcu_dereference_raw(node->slots[offset]) != ptr)
  988. break;
  989. if (count < 0) {
  990. node->slots[offset] = NULL;
  991. node->count--;
  992. }
  993. node->exceptional += exceptional;
  994. offset++;
  995. }
  996. #endif
  997. }
  998. static void replace_slot(void __rcu **slot, void *item,
  999. struct radix_tree_node *node, int count, int exceptional)
  1000. {
  1001. if (WARN_ON_ONCE(radix_tree_is_internal_node(item)))
  1002. return;
  1003. if (node && (count || exceptional)) {
  1004. node->count += count;
  1005. node->exceptional += exceptional;
  1006. replace_sibling_entries(node, slot, count, exceptional);
  1007. }
  1008. rcu_assign_pointer(*slot, item);
  1009. }
  1010. static bool node_tag_get(const struct radix_tree_root *root,
  1011. const struct radix_tree_node *node,
  1012. unsigned int tag, unsigned int offset)
  1013. {
  1014. if (node)
  1015. return tag_get(node, tag, offset);
  1016. return root_tag_get(root, tag);
  1017. }
  1018. /*
  1019. * IDR users want to be able to store NULL in the tree, so if the slot isn't
  1020. * free, don't adjust the count, even if it's transitioning between NULL and
  1021. * non-NULL. For the IDA, we mark slots as being IDR_FREE while they still
  1022. * have empty bits, but it only stores NULL in slots when they're being
  1023. * deleted.
  1024. */
  1025. static int calculate_count(struct radix_tree_root *root,
  1026. struct radix_tree_node *node, void __rcu **slot,
  1027. void *item, void *old)
  1028. {
  1029. if (is_idr(root)) {
  1030. unsigned offset = get_slot_offset(node, slot);
  1031. bool free = node_tag_get(root, node, IDR_FREE, offset);
  1032. if (!free)
  1033. return 0;
  1034. if (!old)
  1035. return 1;
  1036. }
  1037. return !!item - !!old;
  1038. }
  1039. /**
  1040. * __radix_tree_replace - replace item in a slot
  1041. * @root: radix tree root
  1042. * @node: pointer to tree node
  1043. * @slot: pointer to slot in @node
  1044. * @item: new item to store in the slot.
  1045. * @update_node: callback for changing leaf nodes
  1046. *
  1047. * For use with __radix_tree_lookup(). Caller must hold tree write locked
  1048. * across slot lookup and replacement.
  1049. */
  1050. void __radix_tree_replace(struct radix_tree_root *root,
  1051. struct radix_tree_node *node,
  1052. void __rcu **slot, void *item,
  1053. radix_tree_update_node_t update_node)
  1054. {
  1055. void *old = rcu_dereference_raw(*slot);
  1056. int exceptional = !!radix_tree_exceptional_entry(item) -
  1057. !!radix_tree_exceptional_entry(old);
  1058. int count = calculate_count(root, node, slot, item, old);
  1059. /*
  1060. * This function supports replacing exceptional entries and
  1061. * deleting entries, but that needs accounting against the
  1062. * node unless the slot is root->rnode.
  1063. */
  1064. WARN_ON_ONCE(!node && (slot != (void __rcu **)&root->rnode) &&
  1065. (count || exceptional));
  1066. replace_slot(slot, item, node, count, exceptional);
  1067. if (!node)
  1068. return;
  1069. if (update_node)
  1070. update_node(node);
  1071. delete_node(root, node, update_node);
  1072. }
  1073. /**
  1074. * radix_tree_replace_slot - replace item in a slot
  1075. * @root: radix tree root
  1076. * @slot: pointer to slot
  1077. * @item: new item to store in the slot.
  1078. *
  1079. * For use with radix_tree_lookup_slot(), radix_tree_gang_lookup_slot(),
  1080. * radix_tree_gang_lookup_tag_slot(). Caller must hold tree write locked
  1081. * across slot lookup and replacement.
  1082. *
  1083. * NOTE: This cannot be used to switch between non-entries (empty slots),
  1084. * regular entries, and exceptional entries, as that requires accounting
  1085. * inside the radix tree node. When switching from one type of entry or
  1086. * deleting, use __radix_tree_lookup() and __radix_tree_replace() or
  1087. * radix_tree_iter_replace().
  1088. */
  1089. void radix_tree_replace_slot(struct radix_tree_root *root,
  1090. void __rcu **slot, void *item)
  1091. {
  1092. __radix_tree_replace(root, NULL, slot, item, NULL);
  1093. }
  1094. EXPORT_SYMBOL(radix_tree_replace_slot);
  1095. /**
  1096. * radix_tree_iter_replace - replace item in a slot
  1097. * @root: radix tree root
  1098. * @slot: pointer to slot
  1099. * @item: new item to store in the slot.
  1100. *
  1101. * For use with radix_tree_split() and radix_tree_for_each_slot().
  1102. * Caller must hold tree write locked across split and replacement.
  1103. */
  1104. void radix_tree_iter_replace(struct radix_tree_root *root,
  1105. const struct radix_tree_iter *iter,
  1106. void __rcu **slot, void *item)
  1107. {
  1108. __radix_tree_replace(root, iter->node, slot, item, NULL);
  1109. }
  1110. #ifdef CONFIG_RADIX_TREE_MULTIORDER
  1111. /**
  1112. * radix_tree_join - replace multiple entries with one multiorder entry
  1113. * @root: radix tree root
  1114. * @index: an index inside the new entry
  1115. * @order: order of the new entry
  1116. * @item: new entry
  1117. *
  1118. * Call this function to replace several entries with one larger entry.
  1119. * The existing entries are presumed to not need freeing as a result of
  1120. * this call.
  1121. *
  1122. * The replacement entry will have all the tags set on it that were set
  1123. * on any of the entries it is replacing.
  1124. */
  1125. int radix_tree_join(struct radix_tree_root *root, unsigned long index,
  1126. unsigned order, void *item)
  1127. {
  1128. struct radix_tree_node *node;
  1129. void __rcu **slot;
  1130. int error;
  1131. BUG_ON(radix_tree_is_internal_node(item));
  1132. error = __radix_tree_create(root, index, order, &node, &slot);
  1133. if (!error)
  1134. error = insert_entries(node, slot, item, order, true);
  1135. if (error > 0)
  1136. error = 0;
  1137. return error;
  1138. }
  1139. /**
  1140. * radix_tree_split - Split an entry into smaller entries
  1141. * @root: radix tree root
  1142. * @index: An index within the large entry
  1143. * @order: Order of new entries
  1144. *
  1145. * Call this function as the first step in replacing a multiorder entry
  1146. * with several entries of lower order. After this function returns,
  1147. * loop over the relevant portion of the tree using radix_tree_for_each_slot()
  1148. * and call radix_tree_iter_replace() to set up each new entry.
  1149. *
  1150. * The tags from this entry are replicated to all the new entries.
  1151. *
  1152. * The radix tree should be locked against modification during the entire
  1153. * replacement operation. Lock-free lookups will see RADIX_TREE_RETRY which
  1154. * should prompt RCU walkers to restart the lookup from the root.
  1155. */
  1156. int radix_tree_split(struct radix_tree_root *root, unsigned long index,
  1157. unsigned order)
  1158. {
  1159. struct radix_tree_node *parent, *node, *child;
  1160. void __rcu **slot;
  1161. unsigned int offset, end;
  1162. unsigned n, tag, tags = 0;
  1163. gfp_t gfp = root_gfp_mask(root);
  1164. if (!__radix_tree_lookup(root, index, &parent, &slot))
  1165. return -ENOENT;
  1166. if (!parent)
  1167. return -ENOENT;
  1168. offset = get_slot_offset(parent, slot);
  1169. for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
  1170. if (tag_get(parent, tag, offset))
  1171. tags |= 1 << tag;
  1172. for (end = offset + 1; end < RADIX_TREE_MAP_SIZE; end++) {
  1173. if (!is_sibling_entry(parent,
  1174. rcu_dereference_raw(parent->slots[end])))
  1175. break;
  1176. for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
  1177. if (tags & (1 << tag))
  1178. tag_set(parent, tag, end);
  1179. /* rcu_assign_pointer ensures tags are set before RETRY */
  1180. rcu_assign_pointer(parent->slots[end], RADIX_TREE_RETRY);
  1181. }
  1182. rcu_assign_pointer(parent->slots[offset], RADIX_TREE_RETRY);
  1183. parent->exceptional -= (end - offset);
  1184. if (order == parent->shift)
  1185. return 0;
  1186. if (order > parent->shift) {
  1187. while (offset < end)
  1188. offset += insert_entries(parent, &parent->slots[offset],
  1189. RADIX_TREE_RETRY, order, true);
  1190. return 0;
  1191. }
  1192. node = parent;
  1193. for (;;) {
  1194. if (node->shift > order) {
  1195. child = radix_tree_node_alloc(gfp, node, root,
  1196. node->shift - RADIX_TREE_MAP_SHIFT,
  1197. offset, 0, 0);
  1198. if (!child)
  1199. goto nomem;
  1200. if (node != parent) {
  1201. node->count++;
  1202. rcu_assign_pointer(node->slots[offset],
  1203. node_to_entry(child));
  1204. for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
  1205. if (tags & (1 << tag))
  1206. tag_set(node, tag, offset);
  1207. }
  1208. node = child;
  1209. offset = 0;
  1210. continue;
  1211. }
  1212. n = insert_entries(node, &node->slots[offset],
  1213. RADIX_TREE_RETRY, order, false);
  1214. BUG_ON(n > RADIX_TREE_MAP_SIZE);
  1215. for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
  1216. if (tags & (1 << tag))
  1217. tag_set(node, tag, offset);
  1218. offset += n;
  1219. while (offset == RADIX_TREE_MAP_SIZE) {
  1220. if (node == parent)
  1221. break;
  1222. offset = node->offset;
  1223. child = node;
  1224. node = node->parent;
  1225. rcu_assign_pointer(node->slots[offset],
  1226. node_to_entry(child));
  1227. offset++;
  1228. }
  1229. if ((node == parent) && (offset == end))
  1230. return 0;
  1231. }
  1232. nomem:
  1233. /* Shouldn't happen; did user forget to preload? */
  1234. /* TODO: free all the allocated nodes */
  1235. WARN_ON(1);
  1236. return -ENOMEM;
  1237. }
  1238. #endif
  1239. static void node_tag_set(struct radix_tree_root *root,
  1240. struct radix_tree_node *node,
  1241. unsigned int tag, unsigned int offset)
  1242. {
  1243. while (node) {
  1244. if (tag_get(node, tag, offset))
  1245. return;
  1246. tag_set(node, tag, offset);
  1247. offset = node->offset;
  1248. node = node->parent;
  1249. }
  1250. if (!root_tag_get(root, tag))
  1251. root_tag_set(root, tag);
  1252. }
  1253. /**
  1254. * radix_tree_tag_set - set a tag on a radix tree node
  1255. * @root: radix tree root
  1256. * @index: index key
  1257. * @tag: tag index
  1258. *
  1259. * Set the search tag (which must be < RADIX_TREE_MAX_TAGS)
  1260. * corresponding to @index in the radix tree. From
  1261. * the root all the way down to the leaf node.
  1262. *
  1263. * Returns the address of the tagged item. Setting a tag on a not-present
  1264. * item is a bug.
  1265. */
  1266. void *radix_tree_tag_set(struct radix_tree_root *root,
  1267. unsigned long index, unsigned int tag)
  1268. {
  1269. struct radix_tree_node *node, *parent;
  1270. unsigned long maxindex;
  1271. radix_tree_load_root(root, &node, &maxindex);
  1272. BUG_ON(index > maxindex);
  1273. while (radix_tree_is_internal_node(node)) {
  1274. unsigned offset;
  1275. parent = entry_to_node(node);
  1276. offset = radix_tree_descend(parent, &node, index);
  1277. BUG_ON(!node);
  1278. if (!tag_get(parent, tag, offset))
  1279. tag_set(parent, tag, offset);
  1280. }
  1281. /* set the root's tag bit */
  1282. if (!root_tag_get(root, tag))
  1283. root_tag_set(root, tag);
  1284. return node;
  1285. }
  1286. EXPORT_SYMBOL(radix_tree_tag_set);
  1287. /**
  1288. * radix_tree_iter_tag_set - set a tag on the current iterator entry
  1289. * @root: radix tree root
  1290. * @iter: iterator state
  1291. * @tag: tag to set
  1292. */
  1293. void radix_tree_iter_tag_set(struct radix_tree_root *root,
  1294. const struct radix_tree_iter *iter, unsigned int tag)
  1295. {
  1296. node_tag_set(root, iter->node, tag, iter_offset(iter));
  1297. }
  1298. static void node_tag_clear(struct radix_tree_root *root,
  1299. struct radix_tree_node *node,
  1300. unsigned int tag, unsigned int offset)
  1301. {
  1302. while (node) {
  1303. if (!tag_get(node, tag, offset))
  1304. return;
  1305. tag_clear(node, tag, offset);
  1306. if (any_tag_set(node, tag))
  1307. return;
  1308. offset = node->offset;
  1309. node = node->parent;
  1310. }
  1311. /* clear the root's tag bit */
  1312. if (root_tag_get(root, tag))
  1313. root_tag_clear(root, tag);
  1314. }
  1315. /**
  1316. * radix_tree_tag_clear - clear a tag on a radix tree node
  1317. * @root: radix tree root
  1318. * @index: index key
  1319. * @tag: tag index
  1320. *
  1321. * Clear the search tag (which must be < RADIX_TREE_MAX_TAGS)
  1322. * corresponding to @index in the radix tree. If this causes
  1323. * the leaf node to have no tags set then clear the tag in the
  1324. * next-to-leaf node, etc.
  1325. *
  1326. * Returns the address of the tagged item on success, else NULL. ie:
  1327. * has the same return value and semantics as radix_tree_lookup().
  1328. */
  1329. void *radix_tree_tag_clear(struct radix_tree_root *root,
  1330. unsigned long index, unsigned int tag)
  1331. {
  1332. struct radix_tree_node *node, *parent;
  1333. unsigned long maxindex;
  1334. int uninitialized_var(offset);
  1335. radix_tree_load_root(root, &node, &maxindex);
  1336. if (index > maxindex)
  1337. return NULL;
  1338. parent = NULL;
  1339. while (radix_tree_is_internal_node(node)) {
  1340. parent = entry_to_node(node);
  1341. offset = radix_tree_descend(parent, &node, index);
  1342. }
  1343. if (node)
  1344. node_tag_clear(root, parent, tag, offset);
  1345. return node;
  1346. }
  1347. EXPORT_SYMBOL(radix_tree_tag_clear);
  1348. /**
  1349. * radix_tree_iter_tag_clear - clear a tag on the current iterator entry
  1350. * @root: radix tree root
  1351. * @iter: iterator state
  1352. * @tag: tag to clear
  1353. */
  1354. void radix_tree_iter_tag_clear(struct radix_tree_root *root,
  1355. const struct radix_tree_iter *iter, unsigned int tag)
  1356. {
  1357. node_tag_clear(root, iter->node, tag, iter_offset(iter));
  1358. }
  1359. /**
  1360. * radix_tree_tag_get - get a tag on a radix tree node
  1361. * @root: radix tree root
  1362. * @index: index key
  1363. * @tag: tag index (< RADIX_TREE_MAX_TAGS)
  1364. *
  1365. * Return values:
  1366. *
  1367. * 0: tag not present or not set
  1368. * 1: tag set
  1369. *
  1370. * Note that the return value of this function may not be relied on, even if
  1371. * the RCU lock is held, unless tag modification and node deletion are excluded
  1372. * from concurrency.
  1373. */
  1374. int radix_tree_tag_get(const struct radix_tree_root *root,
  1375. unsigned long index, unsigned int tag)
  1376. {
  1377. struct radix_tree_node *node, *parent;
  1378. unsigned long maxindex;
  1379. if (!root_tag_get(root, tag))
  1380. return 0;
  1381. radix_tree_load_root(root, &node, &maxindex);
  1382. if (index > maxindex)
  1383. return 0;
  1384. while (radix_tree_is_internal_node(node)) {
  1385. unsigned offset;
  1386. parent = entry_to_node(node);
  1387. offset = radix_tree_descend(parent, &node, index);
  1388. if (!tag_get(parent, tag, offset))
  1389. return 0;
  1390. if (node == RADIX_TREE_RETRY)
  1391. break;
  1392. }
  1393. return 1;
  1394. }
  1395. EXPORT_SYMBOL(radix_tree_tag_get);
  1396. static inline void __set_iter_shift(struct radix_tree_iter *iter,
  1397. unsigned int shift)
  1398. {
  1399. #ifdef CONFIG_RADIX_TREE_MULTIORDER
  1400. iter->shift = shift;
  1401. #endif
  1402. }
  1403. /* Construct iter->tags bit-mask from node->tags[tag] array */
  1404. static void set_iter_tags(struct radix_tree_iter *iter,
  1405. struct radix_tree_node *node, unsigned offset,
  1406. unsigned tag)
  1407. {
  1408. unsigned tag_long = offset / BITS_PER_LONG;
  1409. unsigned tag_bit = offset % BITS_PER_LONG;
  1410. if (!node) {
  1411. iter->tags = 1;
  1412. return;
  1413. }
  1414. iter->tags = node->tags[tag][tag_long] >> tag_bit;
  1415. /* This never happens if RADIX_TREE_TAG_LONGS == 1 */
  1416. if (tag_long < RADIX_TREE_TAG_LONGS - 1) {
  1417. /* Pick tags from next element */
  1418. if (tag_bit)
  1419. iter->tags |= node->tags[tag][tag_long + 1] <<
  1420. (BITS_PER_LONG - tag_bit);
  1421. /* Clip chunk size, here only BITS_PER_LONG tags */
  1422. iter->next_index = __radix_tree_iter_add(iter, BITS_PER_LONG);
  1423. }
  1424. }
  1425. #ifdef CONFIG_RADIX_TREE_MULTIORDER
  1426. static void __rcu **skip_siblings(struct radix_tree_node **nodep,
  1427. void __rcu **slot, struct radix_tree_iter *iter)
  1428. {
  1429. void *sib = node_to_entry(slot - 1);
  1430. while (iter->index < iter->next_index) {
  1431. *nodep = rcu_dereference_raw(*slot);
  1432. if (*nodep && *nodep != sib)
  1433. return slot;
  1434. slot++;
  1435. iter->index = __radix_tree_iter_add(iter, 1);
  1436. iter->tags >>= 1;
  1437. }
  1438. *nodep = NULL;
  1439. return NULL;
  1440. }
  1441. void __rcu **__radix_tree_next_slot(void __rcu **slot,
  1442. struct radix_tree_iter *iter, unsigned flags)
  1443. {
  1444. unsigned tag = flags & RADIX_TREE_ITER_TAG_MASK;
  1445. struct radix_tree_node *node = rcu_dereference_raw(*slot);
  1446. slot = skip_siblings(&node, slot, iter);
  1447. while (radix_tree_is_internal_node(node)) {
  1448. unsigned offset;
  1449. unsigned long next_index;
  1450. if (node == RADIX_TREE_RETRY)
  1451. return slot;
  1452. node = entry_to_node(node);
  1453. iter->node = node;
  1454. iter->shift = node->shift;
  1455. if (flags & RADIX_TREE_ITER_TAGGED) {
  1456. offset = radix_tree_find_next_bit(node, tag, 0);
  1457. if (offset == RADIX_TREE_MAP_SIZE)
  1458. return NULL;
  1459. slot = &node->slots[offset];
  1460. iter->index = __radix_tree_iter_add(iter, offset);
  1461. set_iter_tags(iter, node, offset, tag);
  1462. node = rcu_dereference_raw(*slot);
  1463. } else {
  1464. offset = 0;
  1465. slot = &node->slots[0];
  1466. for (;;) {
  1467. node = rcu_dereference_raw(*slot);
  1468. if (node)
  1469. break;
  1470. slot++;
  1471. offset++;
  1472. if (offset == RADIX_TREE_MAP_SIZE)
  1473. return NULL;
  1474. }
  1475. iter->index = __radix_tree_iter_add(iter, offset);
  1476. }
  1477. if ((flags & RADIX_TREE_ITER_CONTIG) && (offset > 0))
  1478. goto none;
  1479. next_index = (iter->index | shift_maxindex(iter->shift)) + 1;
  1480. if (next_index < iter->next_index)
  1481. iter->next_index = next_index;
  1482. }
  1483. return slot;
  1484. none:
  1485. iter->next_index = 0;
  1486. return NULL;
  1487. }
  1488. EXPORT_SYMBOL(__radix_tree_next_slot);
  1489. #else
  1490. static void __rcu **skip_siblings(struct radix_tree_node **nodep,
  1491. void __rcu **slot, struct radix_tree_iter *iter)
  1492. {
  1493. return slot;
  1494. }
  1495. #endif
  1496. void __rcu **radix_tree_iter_resume(void __rcu **slot,
  1497. struct radix_tree_iter *iter)
  1498. {
  1499. struct radix_tree_node *node;
  1500. slot++;
  1501. iter->index = __radix_tree_iter_add(iter, 1);
  1502. skip_siblings(&node, slot, iter);
  1503. iter->next_index = iter->index;
  1504. iter->tags = 0;
  1505. return NULL;
  1506. }
  1507. EXPORT_SYMBOL(radix_tree_iter_resume);
  1508. /**
  1509. * radix_tree_next_chunk - find next chunk of slots for iteration
  1510. *
  1511. * @root: radix tree root
  1512. * @iter: iterator state
  1513. * @flags: RADIX_TREE_ITER_* flags and tag index
  1514. * Returns: pointer to chunk first slot, or NULL if iteration is over
  1515. */
  1516. void __rcu **radix_tree_next_chunk(const struct radix_tree_root *root,
  1517. struct radix_tree_iter *iter, unsigned flags)
  1518. {
  1519. unsigned tag = flags & RADIX_TREE_ITER_TAG_MASK;
  1520. struct radix_tree_node *node, *child;
  1521. unsigned long index, offset, maxindex;
  1522. if ((flags & RADIX_TREE_ITER_TAGGED) && !root_tag_get(root, tag))
  1523. return NULL;
  1524. /*
  1525. * Catch next_index overflow after ~0UL. iter->index never overflows
  1526. * during iterating; it can be zero only at the beginning.
  1527. * And we cannot overflow iter->next_index in a single step,
  1528. * because RADIX_TREE_MAP_SHIFT < BITS_PER_LONG.
  1529. *
  1530. * This condition also used by radix_tree_next_slot() to stop
  1531. * contiguous iterating, and forbid switching to the next chunk.
  1532. */
  1533. index = iter->next_index;
  1534. if (!index && iter->index)
  1535. return NULL;
  1536. restart:
  1537. radix_tree_load_root(root, &child, &maxindex);
  1538. if (index > maxindex)
  1539. return NULL;
  1540. if (!child)
  1541. return NULL;
  1542. if (!radix_tree_is_internal_node(child)) {
  1543. /* Single-slot tree */
  1544. iter->index = index;
  1545. iter->next_index = maxindex + 1;
  1546. iter->tags = 1;
  1547. iter->node = NULL;
  1548. __set_iter_shift(iter, 0);
  1549. return (void __rcu **)&root->rnode;
  1550. }
  1551. do {
  1552. node = entry_to_node(child);
  1553. offset = radix_tree_descend(node, &child, index);
  1554. if ((flags & RADIX_TREE_ITER_TAGGED) ?
  1555. !tag_get(node, tag, offset) : !child) {
  1556. /* Hole detected */
  1557. if (flags & RADIX_TREE_ITER_CONTIG)
  1558. return NULL;
  1559. if (flags & RADIX_TREE_ITER_TAGGED)
  1560. offset = radix_tree_find_next_bit(node, tag,
  1561. offset + 1);
  1562. else
  1563. while (++offset < RADIX_TREE_MAP_SIZE) {
  1564. void *slot = rcu_dereference_raw(
  1565. node->slots[offset]);
  1566. if (is_sibling_entry(node, slot))
  1567. continue;
  1568. if (slot)
  1569. break;
  1570. }
  1571. index &= ~node_maxindex(node);
  1572. index += offset << node->shift;
  1573. /* Overflow after ~0UL */
  1574. if (!index)
  1575. return NULL;
  1576. if (offset == RADIX_TREE_MAP_SIZE)
  1577. goto restart;
  1578. child = rcu_dereference_raw(node->slots[offset]);
  1579. }
  1580. if (!child)
  1581. goto restart;
  1582. if (child == RADIX_TREE_RETRY)
  1583. break;
  1584. } while (radix_tree_is_internal_node(child));
  1585. /* Update the iterator state */
  1586. iter->index = (index &~ node_maxindex(node)) | (offset << node->shift);
  1587. iter->next_index = (index | node_maxindex(node)) + 1;
  1588. iter->node = node;
  1589. __set_iter_shift(iter, node->shift);
  1590. if (flags & RADIX_TREE_ITER_TAGGED)
  1591. set_iter_tags(iter, node, offset, tag);
  1592. return node->slots + offset;
  1593. }
  1594. EXPORT_SYMBOL(radix_tree_next_chunk);
  1595. /**
  1596. * radix_tree_gang_lookup - perform multiple lookup on a radix tree
  1597. * @root: radix tree root
  1598. * @results: where the results of the lookup are placed
  1599. * @first_index: start the lookup from this key
  1600. * @max_items: place up to this many items at *results
  1601. *
  1602. * Performs an index-ascending scan of the tree for present items. Places
  1603. * them at *@results and returns the number of items which were placed at
  1604. * *@results.
  1605. *
  1606. * The implementation is naive.
  1607. *
  1608. * Like radix_tree_lookup, radix_tree_gang_lookup may be called under
  1609. * rcu_read_lock. In this case, rather than the returned results being
  1610. * an atomic snapshot of the tree at a single point in time, the
  1611. * semantics of an RCU protected gang lookup are as though multiple
  1612. * radix_tree_lookups have been issued in individual locks, and results
  1613. * stored in 'results'.
  1614. */
  1615. unsigned int
  1616. radix_tree_gang_lookup(const struct radix_tree_root *root, void **results,
  1617. unsigned long first_index, unsigned int max_items)
  1618. {
  1619. struct radix_tree_iter iter;
  1620. void __rcu **slot;
  1621. unsigned int ret = 0;
  1622. if (unlikely(!max_items))
  1623. return 0;
  1624. radix_tree_for_each_slot(slot, root, &iter, first_index) {
  1625. results[ret] = rcu_dereference_raw(*slot);
  1626. if (!results[ret])
  1627. continue;
  1628. if (radix_tree_is_internal_node(results[ret])) {
  1629. slot = radix_tree_iter_retry(&iter);
  1630. continue;
  1631. }
  1632. if (++ret == max_items)
  1633. break;
  1634. }
  1635. return ret;
  1636. }
  1637. EXPORT_SYMBOL(radix_tree_gang_lookup);
  1638. /**
  1639. * radix_tree_gang_lookup_slot - perform multiple slot lookup on radix tree
  1640. * @root: radix tree root
  1641. * @results: where the results of the lookup are placed
  1642. * @indices: where their indices should be placed (but usually NULL)
  1643. * @first_index: start the lookup from this key
  1644. * @max_items: place up to this many items at *results
  1645. *
  1646. * Performs an index-ascending scan of the tree for present items. Places
  1647. * their slots at *@results and returns the number of items which were
  1648. * placed at *@results.
  1649. *
  1650. * The implementation is naive.
  1651. *
  1652. * Like radix_tree_gang_lookup as far as RCU and locking goes. Slots must
  1653. * be dereferenced with radix_tree_deref_slot, and if using only RCU
  1654. * protection, radix_tree_deref_slot may fail requiring a retry.
  1655. */
  1656. unsigned int
  1657. radix_tree_gang_lookup_slot(const struct radix_tree_root *root,
  1658. void __rcu ***results, unsigned long *indices,
  1659. unsigned long first_index, unsigned int max_items)
  1660. {
  1661. struct radix_tree_iter iter;
  1662. void __rcu **slot;
  1663. unsigned int ret = 0;
  1664. if (unlikely(!max_items))
  1665. return 0;
  1666. radix_tree_for_each_slot(slot, root, &iter, first_index) {
  1667. results[ret] = slot;
  1668. if (indices)
  1669. indices[ret] = iter.index;
  1670. if (++ret == max_items)
  1671. break;
  1672. }
  1673. return ret;
  1674. }
  1675. EXPORT_SYMBOL(radix_tree_gang_lookup_slot);
  1676. /**
  1677. * radix_tree_gang_lookup_tag - perform multiple lookup on a radix tree
  1678. * based on a tag
  1679. * @root: radix tree root
  1680. * @results: where the results of the lookup are placed
  1681. * @first_index: start the lookup from this key
  1682. * @max_items: place up to this many items at *results
  1683. * @tag: the tag index (< RADIX_TREE_MAX_TAGS)
  1684. *
  1685. * Performs an index-ascending scan of the tree for present items which
  1686. * have the tag indexed by @tag set. Places the items at *@results and
  1687. * returns the number of items which were placed at *@results.
  1688. */
  1689. unsigned int
  1690. radix_tree_gang_lookup_tag(const struct radix_tree_root *root, void **results,
  1691. unsigned long first_index, unsigned int max_items,
  1692. unsigned int tag)
  1693. {
  1694. struct radix_tree_iter iter;
  1695. void __rcu **slot;
  1696. unsigned int ret = 0;
  1697. if (unlikely(!max_items))
  1698. return 0;
  1699. radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) {
  1700. results[ret] = rcu_dereference_raw(*slot);
  1701. if (!results[ret])
  1702. continue;
  1703. if (radix_tree_is_internal_node(results[ret])) {
  1704. slot = radix_tree_iter_retry(&iter);
  1705. continue;
  1706. }
  1707. if (++ret == max_items)
  1708. break;
  1709. }
  1710. return ret;
  1711. }
  1712. EXPORT_SYMBOL(radix_tree_gang_lookup_tag);
  1713. /**
  1714. * radix_tree_gang_lookup_tag_slot - perform multiple slot lookup on a
  1715. * radix tree based on a tag
  1716. * @root: radix tree root
  1717. * @results: where the results of the lookup are placed
  1718. * @first_index: start the lookup from this key
  1719. * @max_items: place up to this many items at *results
  1720. * @tag: the tag index (< RADIX_TREE_MAX_TAGS)
  1721. *
  1722. * Performs an index-ascending scan of the tree for present items which
  1723. * have the tag indexed by @tag set. Places the slots at *@results and
  1724. * returns the number of slots which were placed at *@results.
  1725. */
  1726. unsigned int
  1727. radix_tree_gang_lookup_tag_slot(const struct radix_tree_root *root,
  1728. void __rcu ***results, unsigned long first_index,
  1729. unsigned int max_items, unsigned int tag)
  1730. {
  1731. struct radix_tree_iter iter;
  1732. void __rcu **slot;
  1733. unsigned int ret = 0;
  1734. if (unlikely(!max_items))
  1735. return 0;
  1736. radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) {
  1737. results[ret] = slot;
  1738. if (++ret == max_items)
  1739. break;
  1740. }
  1741. return ret;
  1742. }
  1743. EXPORT_SYMBOL(radix_tree_gang_lookup_tag_slot);
  1744. /**
  1745. * __radix_tree_delete_node - try to free node after clearing a slot
  1746. * @root: radix tree root
  1747. * @node: node containing @index
  1748. * @update_node: callback for changing leaf nodes
  1749. *
  1750. * After clearing the slot at @index in @node from radix tree
  1751. * rooted at @root, call this function to attempt freeing the
  1752. * node and shrinking the tree.
  1753. */
  1754. void __radix_tree_delete_node(struct radix_tree_root *root,
  1755. struct radix_tree_node *node,
  1756. radix_tree_update_node_t update_node)
  1757. {
  1758. delete_node(root, node, update_node);
  1759. }
  1760. static bool __radix_tree_delete(struct radix_tree_root *root,
  1761. struct radix_tree_node *node, void __rcu **slot)
  1762. {
  1763. void *old = rcu_dereference_raw(*slot);
  1764. int exceptional = radix_tree_exceptional_entry(old) ? -1 : 0;
  1765. unsigned offset = get_slot_offset(node, slot);
  1766. int tag;
  1767. if (is_idr(root))
  1768. node_tag_set(root, node, IDR_FREE, offset);
  1769. else
  1770. for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
  1771. node_tag_clear(root, node, tag, offset);
  1772. replace_slot(slot, NULL, node, -1, exceptional);
  1773. return node && delete_node(root, node, NULL);
  1774. }
  1775. /**
  1776. * radix_tree_iter_delete - delete the entry at this iterator position
  1777. * @root: radix tree root
  1778. * @iter: iterator state
  1779. * @slot: pointer to slot
  1780. *
  1781. * Delete the entry at the position currently pointed to by the iterator.
  1782. * This may result in the current node being freed; if it is, the iterator
  1783. * is advanced so that it will not reference the freed memory. This
  1784. * function may be called without any locking if there are no other threads
  1785. * which can access this tree.
  1786. */
  1787. void radix_tree_iter_delete(struct radix_tree_root *root,
  1788. struct radix_tree_iter *iter, void __rcu **slot)
  1789. {
  1790. if (__radix_tree_delete(root, iter->node, slot))
  1791. iter->index = iter->next_index;
  1792. }
  1793. EXPORT_SYMBOL(radix_tree_iter_delete);
  1794. /**
  1795. * radix_tree_delete_item - delete an item from a radix tree
  1796. * @root: radix tree root
  1797. * @index: index key
  1798. * @item: expected item
  1799. *
  1800. * Remove @item at @index from the radix tree rooted at @root.
  1801. *
  1802. * Return: the deleted entry, or %NULL if it was not present
  1803. * or the entry at the given @index was not @item.
  1804. */
  1805. void *radix_tree_delete_item(struct radix_tree_root *root,
  1806. unsigned long index, void *item)
  1807. {
  1808. struct radix_tree_node *node = NULL;
  1809. void __rcu **slot;
  1810. void *entry;
  1811. entry = __radix_tree_lookup(root, index, &node, &slot);
  1812. if (!entry && (!is_idr(root) || node_tag_get(root, node, IDR_FREE,
  1813. get_slot_offset(node, slot))))
  1814. return NULL;
  1815. if (item && entry != item)
  1816. return NULL;
  1817. __radix_tree_delete(root, node, slot);
  1818. return entry;
  1819. }
  1820. EXPORT_SYMBOL(radix_tree_delete_item);
  1821. /**
  1822. * radix_tree_delete - delete an entry from a radix tree
  1823. * @root: radix tree root
  1824. * @index: index key
  1825. *
  1826. * Remove the entry at @index from the radix tree rooted at @root.
  1827. *
  1828. * Return: The deleted entry, or %NULL if it was not present.
  1829. */
  1830. void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
  1831. {
  1832. return radix_tree_delete_item(root, index, NULL);
  1833. }
  1834. EXPORT_SYMBOL(radix_tree_delete);
  1835. void radix_tree_clear_tags(struct radix_tree_root *root,
  1836. struct radix_tree_node *node,
  1837. void __rcu **slot)
  1838. {
  1839. if (node) {
  1840. unsigned int tag, offset = get_slot_offset(node, slot);
  1841. for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
  1842. node_tag_clear(root, node, tag, offset);
  1843. } else {
  1844. root_tag_clear_all(root);
  1845. }
  1846. }
  1847. /**
  1848. * radix_tree_tagged - test whether any items in the tree are tagged
  1849. * @root: radix tree root
  1850. * @tag: tag to test
  1851. */
  1852. int radix_tree_tagged(const struct radix_tree_root *root, unsigned int tag)
  1853. {
  1854. return root_tag_get(root, tag);
  1855. }
  1856. EXPORT_SYMBOL(radix_tree_tagged);
  1857. /**
  1858. * idr_preload - preload for idr_alloc()
  1859. * @gfp_mask: allocation mask to use for preloading
  1860. *
  1861. * Preallocate memory to use for the next call to idr_alloc(). This function
  1862. * returns with preemption disabled. It will be enabled by idr_preload_end().
  1863. */
  1864. void idr_preload(gfp_t gfp_mask)
  1865. {
  1866. if (__radix_tree_preload(gfp_mask, IDR_PRELOAD_SIZE))
  1867. preempt_disable();
  1868. }
  1869. EXPORT_SYMBOL(idr_preload);
  1870. /**
  1871. * ida_pre_get - reserve resources for ida allocation
  1872. * @ida: ida handle
  1873. * @gfp: memory allocation flags
  1874. *
  1875. * This function should be called before calling ida_get_new_above(). If it
  1876. * is unable to allocate memory, it will return %0. On success, it returns %1.
  1877. */
  1878. int ida_pre_get(struct ida *ida, gfp_t gfp)
  1879. {
  1880. /*
  1881. * The IDA API has no preload_end() equivalent. Instead,
  1882. * ida_get_new() can return -EAGAIN, prompting the caller
  1883. * to return to the ida_pre_get() step.
  1884. */
  1885. if (!__radix_tree_preload(gfp, IDA_PRELOAD_SIZE))
  1886. preempt_enable();
  1887. if (!this_cpu_read(ida_bitmap)) {
  1888. struct ida_bitmap *bitmap = kzalloc(sizeof(*bitmap), gfp);
  1889. if (!bitmap)
  1890. return 0;
  1891. if (this_cpu_cmpxchg(ida_bitmap, NULL, bitmap))
  1892. kfree(bitmap);
  1893. }
  1894. return 1;
  1895. }
  1896. EXPORT_SYMBOL(ida_pre_get);
  1897. void __rcu **idr_get_free(struct radix_tree_root *root,
  1898. struct radix_tree_iter *iter, gfp_t gfp,
  1899. unsigned long max)
  1900. {
  1901. struct radix_tree_node *node = NULL, *child;
  1902. void __rcu **slot = (void __rcu **)&root->rnode;
  1903. unsigned long maxindex, start = iter->next_index;
  1904. unsigned int shift, offset = 0;
  1905. grow:
  1906. shift = radix_tree_load_root(root, &child, &maxindex);
  1907. if (!radix_tree_tagged(root, IDR_FREE))
  1908. start = max(start, maxindex + 1);
  1909. if (start > max)
  1910. return ERR_PTR(-ENOSPC);
  1911. if (start > maxindex) {
  1912. int error = radix_tree_extend(root, gfp, start, shift);
  1913. if (error < 0)
  1914. return ERR_PTR(error);
  1915. shift = error;
  1916. child = rcu_dereference_raw(root->rnode);
  1917. }
  1918. while (shift) {
  1919. shift -= RADIX_TREE_MAP_SHIFT;
  1920. if (child == NULL) {
  1921. /* Have to add a child node. */
  1922. child = radix_tree_node_alloc(gfp, node, root, shift,
  1923. offset, 0, 0);
  1924. if (!child)
  1925. return ERR_PTR(-ENOMEM);
  1926. all_tag_set(child, IDR_FREE);
  1927. rcu_assign_pointer(*slot, node_to_entry(child));
  1928. if (node)
  1929. node->count++;
  1930. } else if (!radix_tree_is_internal_node(child))
  1931. break;
  1932. node = entry_to_node(child);
  1933. offset = radix_tree_descend(node, &child, start);
  1934. if (!tag_get(node, IDR_FREE, offset)) {
  1935. offset = radix_tree_find_next_bit(node, IDR_FREE,
  1936. offset + 1);
  1937. start = next_index(start, node, offset);
  1938. if (start > max)
  1939. return ERR_PTR(-ENOSPC);
  1940. while (offset == RADIX_TREE_MAP_SIZE) {
  1941. offset = node->offset + 1;
  1942. node = node->parent;
  1943. if (!node)
  1944. goto grow;
  1945. shift = node->shift;
  1946. }
  1947. child = rcu_dereference_raw(node->slots[offset]);
  1948. }
  1949. slot = &node->slots[offset];
  1950. }
  1951. iter->index = start;
  1952. if (node)
  1953. iter->next_index = 1 + min(max, (start | node_maxindex(node)));
  1954. else
  1955. iter->next_index = 1;
  1956. iter->node = node;
  1957. __set_iter_shift(iter, shift);
  1958. set_iter_tags(iter, node, offset, IDR_FREE);
  1959. return slot;
  1960. }
  1961. /**
  1962. * idr_destroy - release all internal memory from an IDR
  1963. * @idr: idr handle
  1964. *
  1965. * After this function is called, the IDR is empty, and may be reused or
  1966. * the data structure containing it may be freed.
  1967. *
  1968. * A typical clean-up sequence for objects stored in an idr tree will use
  1969. * idr_for_each() to free all objects, if necessary, then idr_destroy() to
  1970. * free the memory used to keep track of those objects.
  1971. */
  1972. void idr_destroy(struct idr *idr)
  1973. {
  1974. struct radix_tree_node *node = rcu_dereference_raw(idr->idr_rt.rnode);
  1975. if (radix_tree_is_internal_node(node))
  1976. radix_tree_free_nodes(node);
  1977. idr->idr_rt.rnode = NULL;
  1978. root_tag_set(&idr->idr_rt, IDR_FREE);
  1979. }
  1980. EXPORT_SYMBOL(idr_destroy);
  1981. static void
  1982. radix_tree_node_ctor(void *arg)
  1983. {
  1984. struct radix_tree_node *node = arg;
  1985. memset(node, 0, sizeof(*node));
  1986. INIT_LIST_HEAD(&node->private_list);
  1987. }
  1988. static __init unsigned long __maxindex(unsigned int height)
  1989. {
  1990. unsigned int width = height * RADIX_TREE_MAP_SHIFT;
  1991. int shift = RADIX_TREE_INDEX_BITS - width;
  1992. if (shift < 0)
  1993. return ~0UL;
  1994. if (shift >= BITS_PER_LONG)
  1995. return 0UL;
  1996. return ~0UL >> shift;
  1997. }
  1998. static __init void radix_tree_init_maxnodes(void)
  1999. {
  2000. unsigned long height_to_maxindex[RADIX_TREE_MAX_PATH + 1];
  2001. unsigned int i, j;
  2002. for (i = 0; i < ARRAY_SIZE(height_to_maxindex); i++)
  2003. height_to_maxindex[i] = __maxindex(i);
  2004. for (i = 0; i < ARRAY_SIZE(height_to_maxnodes); i++) {
  2005. for (j = i; j > 0; j--)
  2006. height_to_maxnodes[i] += height_to_maxindex[j - 1] + 1;
  2007. }
  2008. }
  2009. static int radix_tree_cpu_dead(unsigned int cpu)
  2010. {
  2011. struct radix_tree_preload *rtp;
  2012. struct radix_tree_node *node;
  2013. /* Free per-cpu pool of preloaded nodes */
  2014. rtp = &per_cpu(radix_tree_preloads, cpu);
  2015. while (rtp->nr) {
  2016. node = rtp->nodes;
  2017. rtp->nodes = node->parent;
  2018. kmem_cache_free(radix_tree_node_cachep, node);
  2019. rtp->nr--;
  2020. }
  2021. kfree(per_cpu(ida_bitmap, cpu));
  2022. per_cpu(ida_bitmap, cpu) = NULL;
  2023. return 0;
  2024. }
  2025. void __init radix_tree_init(void)
  2026. {
  2027. int ret;
  2028. BUILD_BUG_ON(RADIX_TREE_MAX_TAGS + __GFP_BITS_SHIFT > 32);
  2029. radix_tree_node_cachep = kmem_cache_create("radix_tree_node",
  2030. sizeof(struct radix_tree_node), 0,
  2031. SLAB_PANIC | SLAB_RECLAIM_ACCOUNT,
  2032. radix_tree_node_ctor);
  2033. radix_tree_init_maxnodes();
  2034. ret = cpuhp_setup_state_nocalls(CPUHP_RADIX_DEAD, "lib/radix:dead",
  2035. NULL, radix_tree_cpu_dead);
  2036. WARN_ON(ret < 0);
  2037. }