assoc_array.c 52 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721
  1. /* Generic associative array implementation.
  2. *
  3. * See Documentation/core-api/assoc_array.rst for information.
  4. *
  5. * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved.
  6. * Written by David Howells (dhowells@redhat.com)
  7. *
  8. * This program is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU General Public Licence
  10. * as published by the Free Software Foundation; either version
  11. * 2 of the Licence, or (at your option) any later version.
  12. */
  13. //#define DEBUG
  14. #include <linux/rcupdate.h>
  15. #include <linux/slab.h>
  16. #include <linux/err.h>
  17. #include <linux/assoc_array_priv.h>
  18. /*
  19. * Iterate over an associative array. The caller must hold the RCU read lock
  20. * or better.
  21. */
  22. static int assoc_array_subtree_iterate(const struct assoc_array_ptr *root,
  23. const struct assoc_array_ptr *stop,
  24. int (*iterator)(const void *leaf,
  25. void *iterator_data),
  26. void *iterator_data)
  27. {
  28. const struct assoc_array_shortcut *shortcut;
  29. const struct assoc_array_node *node;
  30. const struct assoc_array_ptr *cursor, *ptr, *parent;
  31. unsigned long has_meta;
  32. int slot, ret;
  33. cursor = root;
  34. begin_node:
  35. if (assoc_array_ptr_is_shortcut(cursor)) {
  36. /* Descend through a shortcut */
  37. shortcut = assoc_array_ptr_to_shortcut(cursor);
  38. cursor = READ_ONCE(shortcut->next_node); /* Address dependency. */
  39. }
  40. node = assoc_array_ptr_to_node(cursor);
  41. slot = 0;
  42. /* We perform two passes of each node.
  43. *
  44. * The first pass does all the leaves in this node. This means we
  45. * don't miss any leaves if the node is split up by insertion whilst
  46. * we're iterating over the branches rooted here (we may, however, see
  47. * some leaves twice).
  48. */
  49. has_meta = 0;
  50. for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
  51. ptr = READ_ONCE(node->slots[slot]); /* Address dependency. */
  52. has_meta |= (unsigned long)ptr;
  53. if (ptr && assoc_array_ptr_is_leaf(ptr)) {
  54. /* We need a barrier between the read of the pointer,
  55. * which is supplied by the above READ_ONCE().
  56. */
  57. /* Invoke the callback */
  58. ret = iterator(assoc_array_ptr_to_leaf(ptr),
  59. iterator_data);
  60. if (ret)
  61. return ret;
  62. }
  63. }
  64. /* The second pass attends to all the metadata pointers. If we follow
  65. * one of these we may find that we don't come back here, but rather go
  66. * back to a replacement node with the leaves in a different layout.
  67. *
  68. * We are guaranteed to make progress, however, as the slot number for
  69. * a particular portion of the key space cannot change - and we
  70. * continue at the back pointer + 1.
  71. */
  72. if (!(has_meta & ASSOC_ARRAY_PTR_META_TYPE))
  73. goto finished_node;
  74. slot = 0;
  75. continue_node:
  76. node = assoc_array_ptr_to_node(cursor);
  77. for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
  78. ptr = READ_ONCE(node->slots[slot]); /* Address dependency. */
  79. if (assoc_array_ptr_is_meta(ptr)) {
  80. cursor = ptr;
  81. goto begin_node;
  82. }
  83. }
  84. finished_node:
  85. /* Move up to the parent (may need to skip back over a shortcut) */
  86. parent = READ_ONCE(node->back_pointer); /* Address dependency. */
  87. slot = node->parent_slot;
  88. if (parent == stop)
  89. return 0;
  90. if (assoc_array_ptr_is_shortcut(parent)) {
  91. shortcut = assoc_array_ptr_to_shortcut(parent);
  92. cursor = parent;
  93. parent = READ_ONCE(shortcut->back_pointer); /* Address dependency. */
  94. slot = shortcut->parent_slot;
  95. if (parent == stop)
  96. return 0;
  97. }
  98. /* Ascend to next slot in parent node */
  99. cursor = parent;
  100. slot++;
  101. goto continue_node;
  102. }
  103. /**
  104. * assoc_array_iterate - Pass all objects in the array to a callback
  105. * @array: The array to iterate over.
  106. * @iterator: The callback function.
  107. * @iterator_data: Private data for the callback function.
  108. *
  109. * Iterate over all the objects in an associative array. Each one will be
  110. * presented to the iterator function.
  111. *
  112. * If the array is being modified concurrently with the iteration then it is
  113. * possible that some objects in the array will be passed to the iterator
  114. * callback more than once - though every object should be passed at least
  115. * once. If this is undesirable then the caller must lock against modification
  116. * for the duration of this function.
  117. *
  118. * The function will return 0 if no objects were in the array or else it will
  119. * return the result of the last iterator function called. Iteration stops
  120. * immediately if any call to the iteration function results in a non-zero
  121. * return.
  122. *
  123. * The caller should hold the RCU read lock or better if concurrent
  124. * modification is possible.
  125. */
  126. int assoc_array_iterate(const struct assoc_array *array,
  127. int (*iterator)(const void *object,
  128. void *iterator_data),
  129. void *iterator_data)
  130. {
  131. struct assoc_array_ptr *root = READ_ONCE(array->root); /* Address dependency. */
  132. if (!root)
  133. return 0;
  134. return assoc_array_subtree_iterate(root, NULL, iterator, iterator_data);
  135. }
  136. enum assoc_array_walk_status {
  137. assoc_array_walk_tree_empty,
  138. assoc_array_walk_found_terminal_node,
  139. assoc_array_walk_found_wrong_shortcut,
  140. };
  141. struct assoc_array_walk_result {
  142. struct {
  143. struct assoc_array_node *node; /* Node in which leaf might be found */
  144. int level;
  145. int slot;
  146. } terminal_node;
  147. struct {
  148. struct assoc_array_shortcut *shortcut;
  149. int level;
  150. int sc_level;
  151. unsigned long sc_segments;
  152. unsigned long dissimilarity;
  153. } wrong_shortcut;
  154. };
  155. /*
  156. * Navigate through the internal tree looking for the closest node to the key.
  157. */
  158. static enum assoc_array_walk_status
  159. assoc_array_walk(const struct assoc_array *array,
  160. const struct assoc_array_ops *ops,
  161. const void *index_key,
  162. struct assoc_array_walk_result *result)
  163. {
  164. struct assoc_array_shortcut *shortcut;
  165. struct assoc_array_node *node;
  166. struct assoc_array_ptr *cursor, *ptr;
  167. unsigned long sc_segments, dissimilarity;
  168. unsigned long segments;
  169. int level, sc_level, next_sc_level;
  170. int slot;
  171. pr_devel("-->%s()\n", __func__);
  172. cursor = READ_ONCE(array->root); /* Address dependency. */
  173. if (!cursor)
  174. return assoc_array_walk_tree_empty;
  175. level = 0;
  176. /* Use segments from the key for the new leaf to navigate through the
  177. * internal tree, skipping through nodes and shortcuts that are on
  178. * route to the destination. Eventually we'll come to a slot that is
  179. * either empty or contains a leaf at which point we've found a node in
  180. * which the leaf we're looking for might be found or into which it
  181. * should be inserted.
  182. */
  183. jumped:
  184. segments = ops->get_key_chunk(index_key, level);
  185. pr_devel("segments[%d]: %lx\n", level, segments);
  186. if (assoc_array_ptr_is_shortcut(cursor))
  187. goto follow_shortcut;
  188. consider_node:
  189. node = assoc_array_ptr_to_node(cursor);
  190. slot = segments >> (level & ASSOC_ARRAY_KEY_CHUNK_MASK);
  191. slot &= ASSOC_ARRAY_FAN_MASK;
  192. ptr = READ_ONCE(node->slots[slot]); /* Address dependency. */
  193. pr_devel("consider slot %x [ix=%d type=%lu]\n",
  194. slot, level, (unsigned long)ptr & 3);
  195. if (!assoc_array_ptr_is_meta(ptr)) {
  196. /* The node doesn't have a node/shortcut pointer in the slot
  197. * corresponding to the index key that we have to follow.
  198. */
  199. result->terminal_node.node = node;
  200. result->terminal_node.level = level;
  201. result->terminal_node.slot = slot;
  202. pr_devel("<--%s() = terminal_node\n", __func__);
  203. return assoc_array_walk_found_terminal_node;
  204. }
  205. if (assoc_array_ptr_is_node(ptr)) {
  206. /* There is a pointer to a node in the slot corresponding to
  207. * this index key segment, so we need to follow it.
  208. */
  209. cursor = ptr;
  210. level += ASSOC_ARRAY_LEVEL_STEP;
  211. if ((level & ASSOC_ARRAY_KEY_CHUNK_MASK) != 0)
  212. goto consider_node;
  213. goto jumped;
  214. }
  215. /* There is a shortcut in the slot corresponding to the index key
  216. * segment. We follow the shortcut if its partial index key matches
  217. * this leaf's. Otherwise we need to split the shortcut.
  218. */
  219. cursor = ptr;
  220. follow_shortcut:
  221. shortcut = assoc_array_ptr_to_shortcut(cursor);
  222. pr_devel("shortcut to %d\n", shortcut->skip_to_level);
  223. sc_level = level + ASSOC_ARRAY_LEVEL_STEP;
  224. BUG_ON(sc_level > shortcut->skip_to_level);
  225. do {
  226. /* Check the leaf against the shortcut's index key a word at a
  227. * time, trimming the final word (the shortcut stores the index
  228. * key completely from the root to the shortcut's target).
  229. */
  230. if ((sc_level & ASSOC_ARRAY_KEY_CHUNK_MASK) == 0)
  231. segments = ops->get_key_chunk(index_key, sc_level);
  232. sc_segments = shortcut->index_key[sc_level >> ASSOC_ARRAY_KEY_CHUNK_SHIFT];
  233. dissimilarity = segments ^ sc_segments;
  234. if (round_up(sc_level, ASSOC_ARRAY_KEY_CHUNK_SIZE) > shortcut->skip_to_level) {
  235. /* Trim segments that are beyond the shortcut */
  236. int shift = shortcut->skip_to_level & ASSOC_ARRAY_KEY_CHUNK_MASK;
  237. dissimilarity &= ~(ULONG_MAX << shift);
  238. next_sc_level = shortcut->skip_to_level;
  239. } else {
  240. next_sc_level = sc_level + ASSOC_ARRAY_KEY_CHUNK_SIZE;
  241. next_sc_level = round_down(next_sc_level, ASSOC_ARRAY_KEY_CHUNK_SIZE);
  242. }
  243. if (dissimilarity != 0) {
  244. /* This shortcut points elsewhere */
  245. result->wrong_shortcut.shortcut = shortcut;
  246. result->wrong_shortcut.level = level;
  247. result->wrong_shortcut.sc_level = sc_level;
  248. result->wrong_shortcut.sc_segments = sc_segments;
  249. result->wrong_shortcut.dissimilarity = dissimilarity;
  250. return assoc_array_walk_found_wrong_shortcut;
  251. }
  252. sc_level = next_sc_level;
  253. } while (sc_level < shortcut->skip_to_level);
  254. /* The shortcut matches the leaf's index to this point. */
  255. cursor = READ_ONCE(shortcut->next_node); /* Address dependency. */
  256. if (((level ^ sc_level) & ~ASSOC_ARRAY_KEY_CHUNK_MASK) != 0) {
  257. level = sc_level;
  258. goto jumped;
  259. } else {
  260. level = sc_level;
  261. goto consider_node;
  262. }
  263. }
  264. /**
  265. * assoc_array_find - Find an object by index key
  266. * @array: The associative array to search.
  267. * @ops: The operations to use.
  268. * @index_key: The key to the object.
  269. *
  270. * Find an object in an associative array by walking through the internal tree
  271. * to the node that should contain the object and then searching the leaves
  272. * there. NULL is returned if the requested object was not found in the array.
  273. *
  274. * The caller must hold the RCU read lock or better.
  275. */
  276. void *assoc_array_find(const struct assoc_array *array,
  277. const struct assoc_array_ops *ops,
  278. const void *index_key)
  279. {
  280. struct assoc_array_walk_result result;
  281. const struct assoc_array_node *node;
  282. const struct assoc_array_ptr *ptr;
  283. const void *leaf;
  284. int slot;
  285. if (assoc_array_walk(array, ops, index_key, &result) !=
  286. assoc_array_walk_found_terminal_node)
  287. return NULL;
  288. node = result.terminal_node.node;
  289. /* If the target key is available to us, it's has to be pointed to by
  290. * the terminal node.
  291. */
  292. for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
  293. ptr = READ_ONCE(node->slots[slot]); /* Address dependency. */
  294. if (ptr && assoc_array_ptr_is_leaf(ptr)) {
  295. /* We need a barrier between the read of the pointer
  296. * and dereferencing the pointer - but only if we are
  297. * actually going to dereference it.
  298. */
  299. leaf = assoc_array_ptr_to_leaf(ptr);
  300. if (ops->compare_object(leaf, index_key))
  301. return (void *)leaf;
  302. }
  303. }
  304. return NULL;
  305. }
  306. /*
  307. * Destructively iterate over an associative array. The caller must prevent
  308. * other simultaneous accesses.
  309. */
  310. static void assoc_array_destroy_subtree(struct assoc_array_ptr *root,
  311. const struct assoc_array_ops *ops)
  312. {
  313. struct assoc_array_shortcut *shortcut;
  314. struct assoc_array_node *node;
  315. struct assoc_array_ptr *cursor, *parent = NULL;
  316. int slot = -1;
  317. pr_devel("-->%s()\n", __func__);
  318. cursor = root;
  319. if (!cursor) {
  320. pr_devel("empty\n");
  321. return;
  322. }
  323. move_to_meta:
  324. if (assoc_array_ptr_is_shortcut(cursor)) {
  325. /* Descend through a shortcut */
  326. pr_devel("[%d] shortcut\n", slot);
  327. BUG_ON(!assoc_array_ptr_is_shortcut(cursor));
  328. shortcut = assoc_array_ptr_to_shortcut(cursor);
  329. BUG_ON(shortcut->back_pointer != parent);
  330. BUG_ON(slot != -1 && shortcut->parent_slot != slot);
  331. parent = cursor;
  332. cursor = shortcut->next_node;
  333. slot = -1;
  334. BUG_ON(!assoc_array_ptr_is_node(cursor));
  335. }
  336. pr_devel("[%d] node\n", slot);
  337. node = assoc_array_ptr_to_node(cursor);
  338. BUG_ON(node->back_pointer != parent);
  339. BUG_ON(slot != -1 && node->parent_slot != slot);
  340. slot = 0;
  341. continue_node:
  342. pr_devel("Node %p [back=%p]\n", node, node->back_pointer);
  343. for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
  344. struct assoc_array_ptr *ptr = node->slots[slot];
  345. if (!ptr)
  346. continue;
  347. if (assoc_array_ptr_is_meta(ptr)) {
  348. parent = cursor;
  349. cursor = ptr;
  350. goto move_to_meta;
  351. }
  352. if (ops) {
  353. pr_devel("[%d] free leaf\n", slot);
  354. ops->free_object(assoc_array_ptr_to_leaf(ptr));
  355. }
  356. }
  357. parent = node->back_pointer;
  358. slot = node->parent_slot;
  359. pr_devel("free node\n");
  360. kfree(node);
  361. if (!parent)
  362. return; /* Done */
  363. /* Move back up to the parent (may need to free a shortcut on
  364. * the way up) */
  365. if (assoc_array_ptr_is_shortcut(parent)) {
  366. shortcut = assoc_array_ptr_to_shortcut(parent);
  367. BUG_ON(shortcut->next_node != cursor);
  368. cursor = parent;
  369. parent = shortcut->back_pointer;
  370. slot = shortcut->parent_slot;
  371. pr_devel("free shortcut\n");
  372. kfree(shortcut);
  373. if (!parent)
  374. return;
  375. BUG_ON(!assoc_array_ptr_is_node(parent));
  376. }
  377. /* Ascend to next slot in parent node */
  378. pr_devel("ascend to %p[%d]\n", parent, slot);
  379. cursor = parent;
  380. node = assoc_array_ptr_to_node(cursor);
  381. slot++;
  382. goto continue_node;
  383. }
  384. /**
  385. * assoc_array_destroy - Destroy an associative array
  386. * @array: The array to destroy.
  387. * @ops: The operations to use.
  388. *
  389. * Discard all metadata and free all objects in an associative array. The
  390. * array will be empty and ready to use again upon completion. This function
  391. * cannot fail.
  392. *
  393. * The caller must prevent all other accesses whilst this takes place as no
  394. * attempt is made to adjust pointers gracefully to permit RCU readlock-holding
  395. * accesses to continue. On the other hand, no memory allocation is required.
  396. */
  397. void assoc_array_destroy(struct assoc_array *array,
  398. const struct assoc_array_ops *ops)
  399. {
  400. assoc_array_destroy_subtree(array->root, ops);
  401. array->root = NULL;
  402. }
  403. /*
  404. * Handle insertion into an empty tree.
  405. */
  406. static bool assoc_array_insert_in_empty_tree(struct assoc_array_edit *edit)
  407. {
  408. struct assoc_array_node *new_n0;
  409. pr_devel("-->%s()\n", __func__);
  410. new_n0 = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL);
  411. if (!new_n0)
  412. return false;
  413. edit->new_meta[0] = assoc_array_node_to_ptr(new_n0);
  414. edit->leaf_p = &new_n0->slots[0];
  415. edit->adjust_count_on = new_n0;
  416. edit->set[0].ptr = &edit->array->root;
  417. edit->set[0].to = assoc_array_node_to_ptr(new_n0);
  418. pr_devel("<--%s() = ok [no root]\n", __func__);
  419. return true;
  420. }
  421. /*
  422. * Handle insertion into a terminal node.
  423. */
  424. static bool assoc_array_insert_into_terminal_node(struct assoc_array_edit *edit,
  425. const struct assoc_array_ops *ops,
  426. const void *index_key,
  427. struct assoc_array_walk_result *result)
  428. {
  429. struct assoc_array_shortcut *shortcut, *new_s0;
  430. struct assoc_array_node *node, *new_n0, *new_n1, *side;
  431. struct assoc_array_ptr *ptr;
  432. unsigned long dissimilarity, base_seg, blank;
  433. size_t keylen;
  434. bool have_meta;
  435. int level, diff;
  436. int slot, next_slot, free_slot, i, j;
  437. node = result->terminal_node.node;
  438. level = result->terminal_node.level;
  439. edit->segment_cache[ASSOC_ARRAY_FAN_OUT] = result->terminal_node.slot;
  440. pr_devel("-->%s()\n", __func__);
  441. /* We arrived at a node which doesn't have an onward node or shortcut
  442. * pointer that we have to follow. This means that (a) the leaf we
  443. * want must go here (either by insertion or replacement) or (b) we
  444. * need to split this node and insert in one of the fragments.
  445. */
  446. free_slot = -1;
  447. /* Firstly, we have to check the leaves in this node to see if there's
  448. * a matching one we should replace in place.
  449. */
  450. for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
  451. ptr = node->slots[i];
  452. if (!ptr) {
  453. free_slot = i;
  454. continue;
  455. }
  456. if (assoc_array_ptr_is_leaf(ptr) &&
  457. ops->compare_object(assoc_array_ptr_to_leaf(ptr),
  458. index_key)) {
  459. pr_devel("replace in slot %d\n", i);
  460. edit->leaf_p = &node->slots[i];
  461. edit->dead_leaf = node->slots[i];
  462. pr_devel("<--%s() = ok [replace]\n", __func__);
  463. return true;
  464. }
  465. }
  466. /* If there is a free slot in this node then we can just insert the
  467. * leaf here.
  468. */
  469. if (free_slot >= 0) {
  470. pr_devel("insert in free slot %d\n", free_slot);
  471. edit->leaf_p = &node->slots[free_slot];
  472. edit->adjust_count_on = node;
  473. pr_devel("<--%s() = ok [insert]\n", __func__);
  474. return true;
  475. }
  476. /* The node has no spare slots - so we're either going to have to split
  477. * it or insert another node before it.
  478. *
  479. * Whatever, we're going to need at least two new nodes - so allocate
  480. * those now. We may also need a new shortcut, but we deal with that
  481. * when we need it.
  482. */
  483. new_n0 = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL);
  484. if (!new_n0)
  485. return false;
  486. edit->new_meta[0] = assoc_array_node_to_ptr(new_n0);
  487. new_n1 = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL);
  488. if (!new_n1)
  489. return false;
  490. edit->new_meta[1] = assoc_array_node_to_ptr(new_n1);
  491. /* We need to find out how similar the leaves are. */
  492. pr_devel("no spare slots\n");
  493. have_meta = false;
  494. for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
  495. ptr = node->slots[i];
  496. if (assoc_array_ptr_is_meta(ptr)) {
  497. edit->segment_cache[i] = 0xff;
  498. have_meta = true;
  499. continue;
  500. }
  501. base_seg = ops->get_object_key_chunk(
  502. assoc_array_ptr_to_leaf(ptr), level);
  503. base_seg >>= level & ASSOC_ARRAY_KEY_CHUNK_MASK;
  504. edit->segment_cache[i] = base_seg & ASSOC_ARRAY_FAN_MASK;
  505. }
  506. if (have_meta) {
  507. pr_devel("have meta\n");
  508. goto split_node;
  509. }
  510. /* The node contains only leaves */
  511. dissimilarity = 0;
  512. base_seg = edit->segment_cache[0];
  513. for (i = 1; i < ASSOC_ARRAY_FAN_OUT; i++)
  514. dissimilarity |= edit->segment_cache[i] ^ base_seg;
  515. pr_devel("only leaves; dissimilarity=%lx\n", dissimilarity);
  516. if ((dissimilarity & ASSOC_ARRAY_FAN_MASK) == 0) {
  517. /* The old leaves all cluster in the same slot. We will need
  518. * to insert a shortcut if the new node wants to cluster with them.
  519. */
  520. if ((edit->segment_cache[ASSOC_ARRAY_FAN_OUT] ^ base_seg) == 0)
  521. goto all_leaves_cluster_together;
  522. /* Otherwise all the old leaves cluster in the same slot, but
  523. * the new leaf wants to go into a different slot - so we
  524. * create a new node (n0) to hold the new leaf and a pointer to
  525. * a new node (n1) holding all the old leaves.
  526. *
  527. * This can be done by falling through to the node splitting
  528. * path.
  529. */
  530. pr_devel("present leaves cluster but not new leaf\n");
  531. }
  532. split_node:
  533. pr_devel("split node\n");
  534. /* We need to split the current node. The node must contain anything
  535. * from a single leaf (in the one leaf case, this leaf will cluster
  536. * with the new leaf) and the rest meta-pointers, to all leaves, some
  537. * of which may cluster.
  538. *
  539. * It won't contain the case in which all the current leaves plus the
  540. * new leaves want to cluster in the same slot.
  541. *
  542. * We need to expel at least two leaves out of a set consisting of the
  543. * leaves in the node and the new leaf. The current meta pointers can
  544. * just be copied as they shouldn't cluster with any of the leaves.
  545. *
  546. * We need a new node (n0) to replace the current one and a new node to
  547. * take the expelled nodes (n1).
  548. */
  549. edit->set[0].to = assoc_array_node_to_ptr(new_n0);
  550. new_n0->back_pointer = node->back_pointer;
  551. new_n0->parent_slot = node->parent_slot;
  552. new_n1->back_pointer = assoc_array_node_to_ptr(new_n0);
  553. new_n1->parent_slot = -1; /* Need to calculate this */
  554. do_split_node:
  555. pr_devel("do_split_node\n");
  556. new_n0->nr_leaves_on_branch = node->nr_leaves_on_branch;
  557. new_n1->nr_leaves_on_branch = 0;
  558. /* Begin by finding two matching leaves. There have to be at least two
  559. * that match - even if there are meta pointers - because any leaf that
  560. * would match a slot with a meta pointer in it must be somewhere
  561. * behind that meta pointer and cannot be here. Further, given N
  562. * remaining leaf slots, we now have N+1 leaves to go in them.
  563. */
  564. for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
  565. slot = edit->segment_cache[i];
  566. if (slot != 0xff)
  567. for (j = i + 1; j < ASSOC_ARRAY_FAN_OUT + 1; j++)
  568. if (edit->segment_cache[j] == slot)
  569. goto found_slot_for_multiple_occupancy;
  570. }
  571. found_slot_for_multiple_occupancy:
  572. pr_devel("same slot: %x %x [%02x]\n", i, j, slot);
  573. BUG_ON(i >= ASSOC_ARRAY_FAN_OUT);
  574. BUG_ON(j >= ASSOC_ARRAY_FAN_OUT + 1);
  575. BUG_ON(slot >= ASSOC_ARRAY_FAN_OUT);
  576. new_n1->parent_slot = slot;
  577. /* Metadata pointers cannot change slot */
  578. for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++)
  579. if (assoc_array_ptr_is_meta(node->slots[i]))
  580. new_n0->slots[i] = node->slots[i];
  581. else
  582. new_n0->slots[i] = NULL;
  583. BUG_ON(new_n0->slots[slot] != NULL);
  584. new_n0->slots[slot] = assoc_array_node_to_ptr(new_n1);
  585. /* Filter the leaf pointers between the new nodes */
  586. free_slot = -1;
  587. next_slot = 0;
  588. for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
  589. if (assoc_array_ptr_is_meta(node->slots[i]))
  590. continue;
  591. if (edit->segment_cache[i] == slot) {
  592. new_n1->slots[next_slot++] = node->slots[i];
  593. new_n1->nr_leaves_on_branch++;
  594. } else {
  595. do {
  596. free_slot++;
  597. } while (new_n0->slots[free_slot] != NULL);
  598. new_n0->slots[free_slot] = node->slots[i];
  599. }
  600. }
  601. pr_devel("filtered: f=%x n=%x\n", free_slot, next_slot);
  602. if (edit->segment_cache[ASSOC_ARRAY_FAN_OUT] != slot) {
  603. do {
  604. free_slot++;
  605. } while (new_n0->slots[free_slot] != NULL);
  606. edit->leaf_p = &new_n0->slots[free_slot];
  607. edit->adjust_count_on = new_n0;
  608. } else {
  609. edit->leaf_p = &new_n1->slots[next_slot++];
  610. edit->adjust_count_on = new_n1;
  611. }
  612. BUG_ON(next_slot <= 1);
  613. edit->set_backpointers_to = assoc_array_node_to_ptr(new_n0);
  614. for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
  615. if (edit->segment_cache[i] == 0xff) {
  616. ptr = node->slots[i];
  617. BUG_ON(assoc_array_ptr_is_leaf(ptr));
  618. if (assoc_array_ptr_is_node(ptr)) {
  619. side = assoc_array_ptr_to_node(ptr);
  620. edit->set_backpointers[i] = &side->back_pointer;
  621. } else {
  622. shortcut = assoc_array_ptr_to_shortcut(ptr);
  623. edit->set_backpointers[i] = &shortcut->back_pointer;
  624. }
  625. }
  626. }
  627. ptr = node->back_pointer;
  628. if (!ptr)
  629. edit->set[0].ptr = &edit->array->root;
  630. else if (assoc_array_ptr_is_node(ptr))
  631. edit->set[0].ptr = &assoc_array_ptr_to_node(ptr)->slots[node->parent_slot];
  632. else
  633. edit->set[0].ptr = &assoc_array_ptr_to_shortcut(ptr)->next_node;
  634. edit->excised_meta[0] = assoc_array_node_to_ptr(node);
  635. pr_devel("<--%s() = ok [split node]\n", __func__);
  636. return true;
  637. all_leaves_cluster_together:
  638. /* All the leaves, new and old, want to cluster together in this node
  639. * in the same slot, so we have to replace this node with a shortcut to
  640. * skip over the identical parts of the key and then place a pair of
  641. * nodes, one inside the other, at the end of the shortcut and
  642. * distribute the keys between them.
  643. *
  644. * Firstly we need to work out where the leaves start diverging as a
  645. * bit position into their keys so that we know how big the shortcut
  646. * needs to be.
  647. *
  648. * We only need to make a single pass of N of the N+1 leaves because if
  649. * any keys differ between themselves at bit X then at least one of
  650. * them must also differ with the base key at bit X or before.
  651. */
  652. pr_devel("all leaves cluster together\n");
  653. diff = INT_MAX;
  654. for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
  655. int x = ops->diff_objects(assoc_array_ptr_to_leaf(node->slots[i]),
  656. index_key);
  657. if (x < diff) {
  658. BUG_ON(x < 0);
  659. diff = x;
  660. }
  661. }
  662. BUG_ON(diff == INT_MAX);
  663. BUG_ON(diff < level + ASSOC_ARRAY_LEVEL_STEP);
  664. keylen = round_up(diff, ASSOC_ARRAY_KEY_CHUNK_SIZE);
  665. keylen >>= ASSOC_ARRAY_KEY_CHUNK_SHIFT;
  666. new_s0 = kzalloc(sizeof(struct assoc_array_shortcut) +
  667. keylen * sizeof(unsigned long), GFP_KERNEL);
  668. if (!new_s0)
  669. return false;
  670. edit->new_meta[2] = assoc_array_shortcut_to_ptr(new_s0);
  671. edit->set[0].to = assoc_array_shortcut_to_ptr(new_s0);
  672. new_s0->back_pointer = node->back_pointer;
  673. new_s0->parent_slot = node->parent_slot;
  674. new_s0->next_node = assoc_array_node_to_ptr(new_n0);
  675. new_n0->back_pointer = assoc_array_shortcut_to_ptr(new_s0);
  676. new_n0->parent_slot = 0;
  677. new_n1->back_pointer = assoc_array_node_to_ptr(new_n0);
  678. new_n1->parent_slot = -1; /* Need to calculate this */
  679. new_s0->skip_to_level = level = diff & ~ASSOC_ARRAY_LEVEL_STEP_MASK;
  680. pr_devel("skip_to_level = %d [diff %d]\n", level, diff);
  681. BUG_ON(level <= 0);
  682. for (i = 0; i < keylen; i++)
  683. new_s0->index_key[i] =
  684. ops->get_key_chunk(index_key, i * ASSOC_ARRAY_KEY_CHUNK_SIZE);
  685. blank = ULONG_MAX << (level & ASSOC_ARRAY_KEY_CHUNK_MASK);
  686. pr_devel("blank off [%zu] %d: %lx\n", keylen - 1, level, blank);
  687. new_s0->index_key[keylen - 1] &= ~blank;
  688. /* This now reduces to a node splitting exercise for which we'll need
  689. * to regenerate the disparity table.
  690. */
  691. for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
  692. ptr = node->slots[i];
  693. base_seg = ops->get_object_key_chunk(assoc_array_ptr_to_leaf(ptr),
  694. level);
  695. base_seg >>= level & ASSOC_ARRAY_KEY_CHUNK_MASK;
  696. edit->segment_cache[i] = base_seg & ASSOC_ARRAY_FAN_MASK;
  697. }
  698. base_seg = ops->get_key_chunk(index_key, level);
  699. base_seg >>= level & ASSOC_ARRAY_KEY_CHUNK_MASK;
  700. edit->segment_cache[ASSOC_ARRAY_FAN_OUT] = base_seg & ASSOC_ARRAY_FAN_MASK;
  701. goto do_split_node;
  702. }
  703. /*
  704. * Handle insertion into the middle of a shortcut.
  705. */
  706. static bool assoc_array_insert_mid_shortcut(struct assoc_array_edit *edit,
  707. const struct assoc_array_ops *ops,
  708. struct assoc_array_walk_result *result)
  709. {
  710. struct assoc_array_shortcut *shortcut, *new_s0, *new_s1;
  711. struct assoc_array_node *node, *new_n0, *side;
  712. unsigned long sc_segments, dissimilarity, blank;
  713. size_t keylen;
  714. int level, sc_level, diff;
  715. int sc_slot;
  716. shortcut = result->wrong_shortcut.shortcut;
  717. level = result->wrong_shortcut.level;
  718. sc_level = result->wrong_shortcut.sc_level;
  719. sc_segments = result->wrong_shortcut.sc_segments;
  720. dissimilarity = result->wrong_shortcut.dissimilarity;
  721. pr_devel("-->%s(ix=%d dis=%lx scix=%d)\n",
  722. __func__, level, dissimilarity, sc_level);
  723. /* We need to split a shortcut and insert a node between the two
  724. * pieces. Zero-length pieces will be dispensed with entirely.
  725. *
  726. * First of all, we need to find out in which level the first
  727. * difference was.
  728. */
  729. diff = __ffs(dissimilarity);
  730. diff &= ~ASSOC_ARRAY_LEVEL_STEP_MASK;
  731. diff += sc_level & ~ASSOC_ARRAY_KEY_CHUNK_MASK;
  732. pr_devel("diff=%d\n", diff);
  733. if (!shortcut->back_pointer) {
  734. edit->set[0].ptr = &edit->array->root;
  735. } else if (assoc_array_ptr_is_node(shortcut->back_pointer)) {
  736. node = assoc_array_ptr_to_node(shortcut->back_pointer);
  737. edit->set[0].ptr = &node->slots[shortcut->parent_slot];
  738. } else {
  739. BUG();
  740. }
  741. edit->excised_meta[0] = assoc_array_shortcut_to_ptr(shortcut);
  742. /* Create a new node now since we're going to need it anyway */
  743. new_n0 = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL);
  744. if (!new_n0)
  745. return false;
  746. edit->new_meta[0] = assoc_array_node_to_ptr(new_n0);
  747. edit->adjust_count_on = new_n0;
  748. /* Insert a new shortcut before the new node if this segment isn't of
  749. * zero length - otherwise we just connect the new node directly to the
  750. * parent.
  751. */
  752. level += ASSOC_ARRAY_LEVEL_STEP;
  753. if (diff > level) {
  754. pr_devel("pre-shortcut %d...%d\n", level, diff);
  755. keylen = round_up(diff, ASSOC_ARRAY_KEY_CHUNK_SIZE);
  756. keylen >>= ASSOC_ARRAY_KEY_CHUNK_SHIFT;
  757. new_s0 = kzalloc(sizeof(struct assoc_array_shortcut) +
  758. keylen * sizeof(unsigned long), GFP_KERNEL);
  759. if (!new_s0)
  760. return false;
  761. edit->new_meta[1] = assoc_array_shortcut_to_ptr(new_s0);
  762. edit->set[0].to = assoc_array_shortcut_to_ptr(new_s0);
  763. new_s0->back_pointer = shortcut->back_pointer;
  764. new_s0->parent_slot = shortcut->parent_slot;
  765. new_s0->next_node = assoc_array_node_to_ptr(new_n0);
  766. new_s0->skip_to_level = diff;
  767. new_n0->back_pointer = assoc_array_shortcut_to_ptr(new_s0);
  768. new_n0->parent_slot = 0;
  769. memcpy(new_s0->index_key, shortcut->index_key,
  770. keylen * sizeof(unsigned long));
  771. blank = ULONG_MAX << (diff & ASSOC_ARRAY_KEY_CHUNK_MASK);
  772. pr_devel("blank off [%zu] %d: %lx\n", keylen - 1, diff, blank);
  773. new_s0->index_key[keylen - 1] &= ~blank;
  774. } else {
  775. pr_devel("no pre-shortcut\n");
  776. edit->set[0].to = assoc_array_node_to_ptr(new_n0);
  777. new_n0->back_pointer = shortcut->back_pointer;
  778. new_n0->parent_slot = shortcut->parent_slot;
  779. }
  780. side = assoc_array_ptr_to_node(shortcut->next_node);
  781. new_n0->nr_leaves_on_branch = side->nr_leaves_on_branch;
  782. /* We need to know which slot in the new node is going to take a
  783. * metadata pointer.
  784. */
  785. sc_slot = sc_segments >> (diff & ASSOC_ARRAY_KEY_CHUNK_MASK);
  786. sc_slot &= ASSOC_ARRAY_FAN_MASK;
  787. pr_devel("new slot %lx >> %d -> %d\n",
  788. sc_segments, diff & ASSOC_ARRAY_KEY_CHUNK_MASK, sc_slot);
  789. /* Determine whether we need to follow the new node with a replacement
  790. * for the current shortcut. We could in theory reuse the current
  791. * shortcut if its parent slot number doesn't change - but that's a
  792. * 1-in-16 chance so not worth expending the code upon.
  793. */
  794. level = diff + ASSOC_ARRAY_LEVEL_STEP;
  795. if (level < shortcut->skip_to_level) {
  796. pr_devel("post-shortcut %d...%d\n", level, shortcut->skip_to_level);
  797. keylen = round_up(shortcut->skip_to_level, ASSOC_ARRAY_KEY_CHUNK_SIZE);
  798. keylen >>= ASSOC_ARRAY_KEY_CHUNK_SHIFT;
  799. new_s1 = kzalloc(sizeof(struct assoc_array_shortcut) +
  800. keylen * sizeof(unsigned long), GFP_KERNEL);
  801. if (!new_s1)
  802. return false;
  803. edit->new_meta[2] = assoc_array_shortcut_to_ptr(new_s1);
  804. new_s1->back_pointer = assoc_array_node_to_ptr(new_n0);
  805. new_s1->parent_slot = sc_slot;
  806. new_s1->next_node = shortcut->next_node;
  807. new_s1->skip_to_level = shortcut->skip_to_level;
  808. new_n0->slots[sc_slot] = assoc_array_shortcut_to_ptr(new_s1);
  809. memcpy(new_s1->index_key, shortcut->index_key,
  810. keylen * sizeof(unsigned long));
  811. edit->set[1].ptr = &side->back_pointer;
  812. edit->set[1].to = assoc_array_shortcut_to_ptr(new_s1);
  813. } else {
  814. pr_devel("no post-shortcut\n");
  815. /* We don't have to replace the pointed-to node as long as we
  816. * use memory barriers to make sure the parent slot number is
  817. * changed before the back pointer (the parent slot number is
  818. * irrelevant to the old parent shortcut).
  819. */
  820. new_n0->slots[sc_slot] = shortcut->next_node;
  821. edit->set_parent_slot[0].p = &side->parent_slot;
  822. edit->set_parent_slot[0].to = sc_slot;
  823. edit->set[1].ptr = &side->back_pointer;
  824. edit->set[1].to = assoc_array_node_to_ptr(new_n0);
  825. }
  826. /* Install the new leaf in a spare slot in the new node. */
  827. if (sc_slot == 0)
  828. edit->leaf_p = &new_n0->slots[1];
  829. else
  830. edit->leaf_p = &new_n0->slots[0];
  831. pr_devel("<--%s() = ok [split shortcut]\n", __func__);
  832. return edit;
  833. }
  834. /**
  835. * assoc_array_insert - Script insertion of an object into an associative array
  836. * @array: The array to insert into.
  837. * @ops: The operations to use.
  838. * @index_key: The key to insert at.
  839. * @object: The object to insert.
  840. *
  841. * Precalculate and preallocate a script for the insertion or replacement of an
  842. * object in an associative array. This results in an edit script that can
  843. * either be applied or cancelled.
  844. *
  845. * The function returns a pointer to an edit script or -ENOMEM.
  846. *
  847. * The caller should lock against other modifications and must continue to hold
  848. * the lock until assoc_array_apply_edit() has been called.
  849. *
  850. * Accesses to the tree may take place concurrently with this function,
  851. * provided they hold the RCU read lock.
  852. */
  853. struct assoc_array_edit *assoc_array_insert(struct assoc_array *array,
  854. const struct assoc_array_ops *ops,
  855. const void *index_key,
  856. void *object)
  857. {
  858. struct assoc_array_walk_result result;
  859. struct assoc_array_edit *edit;
  860. pr_devel("-->%s()\n", __func__);
  861. /* The leaf pointer we're given must not have the bottom bit set as we
  862. * use those for type-marking the pointer. NULL pointers are also not
  863. * allowed as they indicate an empty slot but we have to allow them
  864. * here as they can be updated later.
  865. */
  866. BUG_ON(assoc_array_ptr_is_meta(object));
  867. edit = kzalloc(sizeof(struct assoc_array_edit), GFP_KERNEL);
  868. if (!edit)
  869. return ERR_PTR(-ENOMEM);
  870. edit->array = array;
  871. edit->ops = ops;
  872. edit->leaf = assoc_array_leaf_to_ptr(object);
  873. edit->adjust_count_by = 1;
  874. switch (assoc_array_walk(array, ops, index_key, &result)) {
  875. case assoc_array_walk_tree_empty:
  876. /* Allocate a root node if there isn't one yet */
  877. if (!assoc_array_insert_in_empty_tree(edit))
  878. goto enomem;
  879. return edit;
  880. case assoc_array_walk_found_terminal_node:
  881. /* We found a node that doesn't have a node/shortcut pointer in
  882. * the slot corresponding to the index key that we have to
  883. * follow.
  884. */
  885. if (!assoc_array_insert_into_terminal_node(edit, ops, index_key,
  886. &result))
  887. goto enomem;
  888. return edit;
  889. case assoc_array_walk_found_wrong_shortcut:
  890. /* We found a shortcut that didn't match our key in a slot we
  891. * needed to follow.
  892. */
  893. if (!assoc_array_insert_mid_shortcut(edit, ops, &result))
  894. goto enomem;
  895. return edit;
  896. }
  897. enomem:
  898. /* Clean up after an out of memory error */
  899. pr_devel("enomem\n");
  900. assoc_array_cancel_edit(edit);
  901. return ERR_PTR(-ENOMEM);
  902. }
  903. /**
  904. * assoc_array_insert_set_object - Set the new object pointer in an edit script
  905. * @edit: The edit script to modify.
  906. * @object: The object pointer to set.
  907. *
  908. * Change the object to be inserted in an edit script. The object pointed to
  909. * by the old object is not freed. This must be done prior to applying the
  910. * script.
  911. */
  912. void assoc_array_insert_set_object(struct assoc_array_edit *edit, void *object)
  913. {
  914. BUG_ON(!object);
  915. edit->leaf = assoc_array_leaf_to_ptr(object);
  916. }
  917. struct assoc_array_delete_collapse_context {
  918. struct assoc_array_node *node;
  919. const void *skip_leaf;
  920. int slot;
  921. };
  922. /*
  923. * Subtree collapse to node iterator.
  924. */
  925. static int assoc_array_delete_collapse_iterator(const void *leaf,
  926. void *iterator_data)
  927. {
  928. struct assoc_array_delete_collapse_context *collapse = iterator_data;
  929. if (leaf == collapse->skip_leaf)
  930. return 0;
  931. BUG_ON(collapse->slot >= ASSOC_ARRAY_FAN_OUT);
  932. collapse->node->slots[collapse->slot++] = assoc_array_leaf_to_ptr(leaf);
  933. return 0;
  934. }
  935. /**
  936. * assoc_array_delete - Script deletion of an object from an associative array
  937. * @array: The array to search.
  938. * @ops: The operations to use.
  939. * @index_key: The key to the object.
  940. *
  941. * Precalculate and preallocate a script for the deletion of an object from an
  942. * associative array. This results in an edit script that can either be
  943. * applied or cancelled.
  944. *
  945. * The function returns a pointer to an edit script if the object was found,
  946. * NULL if the object was not found or -ENOMEM.
  947. *
  948. * The caller should lock against other modifications and must continue to hold
  949. * the lock until assoc_array_apply_edit() has been called.
  950. *
  951. * Accesses to the tree may take place concurrently with this function,
  952. * provided they hold the RCU read lock.
  953. */
  954. struct assoc_array_edit *assoc_array_delete(struct assoc_array *array,
  955. const struct assoc_array_ops *ops,
  956. const void *index_key)
  957. {
  958. struct assoc_array_delete_collapse_context collapse;
  959. struct assoc_array_walk_result result;
  960. struct assoc_array_node *node, *new_n0;
  961. struct assoc_array_edit *edit;
  962. struct assoc_array_ptr *ptr;
  963. bool has_meta;
  964. int slot, i;
  965. pr_devel("-->%s()\n", __func__);
  966. edit = kzalloc(sizeof(struct assoc_array_edit), GFP_KERNEL);
  967. if (!edit)
  968. return ERR_PTR(-ENOMEM);
  969. edit->array = array;
  970. edit->ops = ops;
  971. edit->adjust_count_by = -1;
  972. switch (assoc_array_walk(array, ops, index_key, &result)) {
  973. case assoc_array_walk_found_terminal_node:
  974. /* We found a node that should contain the leaf we've been
  975. * asked to remove - *if* it's in the tree.
  976. */
  977. pr_devel("terminal_node\n");
  978. node = result.terminal_node.node;
  979. for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
  980. ptr = node->slots[slot];
  981. if (ptr &&
  982. assoc_array_ptr_is_leaf(ptr) &&
  983. ops->compare_object(assoc_array_ptr_to_leaf(ptr),
  984. index_key))
  985. goto found_leaf;
  986. }
  987. case assoc_array_walk_tree_empty:
  988. case assoc_array_walk_found_wrong_shortcut:
  989. default:
  990. assoc_array_cancel_edit(edit);
  991. pr_devel("not found\n");
  992. return NULL;
  993. }
  994. found_leaf:
  995. BUG_ON(array->nr_leaves_on_tree <= 0);
  996. /* In the simplest form of deletion we just clear the slot and release
  997. * the leaf after a suitable interval.
  998. */
  999. edit->dead_leaf = node->slots[slot];
  1000. edit->set[0].ptr = &node->slots[slot];
  1001. edit->set[0].to = NULL;
  1002. edit->adjust_count_on = node;
  1003. /* If that concludes erasure of the last leaf, then delete the entire
  1004. * internal array.
  1005. */
  1006. if (array->nr_leaves_on_tree == 1) {
  1007. edit->set[1].ptr = &array->root;
  1008. edit->set[1].to = NULL;
  1009. edit->adjust_count_on = NULL;
  1010. edit->excised_subtree = array->root;
  1011. pr_devel("all gone\n");
  1012. return edit;
  1013. }
  1014. /* However, we'd also like to clear up some metadata blocks if we
  1015. * possibly can.
  1016. *
  1017. * We go for a simple algorithm of: if this node has FAN_OUT or fewer
  1018. * leaves in it, then attempt to collapse it - and attempt to
  1019. * recursively collapse up the tree.
  1020. *
  1021. * We could also try and collapse in partially filled subtrees to take
  1022. * up space in this node.
  1023. */
  1024. if (node->nr_leaves_on_branch <= ASSOC_ARRAY_FAN_OUT + 1) {
  1025. struct assoc_array_node *parent, *grandparent;
  1026. struct assoc_array_ptr *ptr;
  1027. /* First of all, we need to know if this node has metadata so
  1028. * that we don't try collapsing if all the leaves are already
  1029. * here.
  1030. */
  1031. has_meta = false;
  1032. for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
  1033. ptr = node->slots[i];
  1034. if (assoc_array_ptr_is_meta(ptr)) {
  1035. has_meta = true;
  1036. break;
  1037. }
  1038. }
  1039. pr_devel("leaves: %ld [m=%d]\n",
  1040. node->nr_leaves_on_branch - 1, has_meta);
  1041. /* Look further up the tree to see if we can collapse this node
  1042. * into a more proximal node too.
  1043. */
  1044. parent = node;
  1045. collapse_up:
  1046. pr_devel("collapse subtree: %ld\n", parent->nr_leaves_on_branch);
  1047. ptr = parent->back_pointer;
  1048. if (!ptr)
  1049. goto do_collapse;
  1050. if (assoc_array_ptr_is_shortcut(ptr)) {
  1051. struct assoc_array_shortcut *s = assoc_array_ptr_to_shortcut(ptr);
  1052. ptr = s->back_pointer;
  1053. if (!ptr)
  1054. goto do_collapse;
  1055. }
  1056. grandparent = assoc_array_ptr_to_node(ptr);
  1057. if (grandparent->nr_leaves_on_branch <= ASSOC_ARRAY_FAN_OUT + 1) {
  1058. parent = grandparent;
  1059. goto collapse_up;
  1060. }
  1061. do_collapse:
  1062. /* There's no point collapsing if the original node has no meta
  1063. * pointers to discard and if we didn't merge into one of that
  1064. * node's ancestry.
  1065. */
  1066. if (has_meta || parent != node) {
  1067. node = parent;
  1068. /* Create a new node to collapse into */
  1069. new_n0 = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL);
  1070. if (!new_n0)
  1071. goto enomem;
  1072. edit->new_meta[0] = assoc_array_node_to_ptr(new_n0);
  1073. new_n0->back_pointer = node->back_pointer;
  1074. new_n0->parent_slot = node->parent_slot;
  1075. new_n0->nr_leaves_on_branch = node->nr_leaves_on_branch;
  1076. edit->adjust_count_on = new_n0;
  1077. collapse.node = new_n0;
  1078. collapse.skip_leaf = assoc_array_ptr_to_leaf(edit->dead_leaf);
  1079. collapse.slot = 0;
  1080. assoc_array_subtree_iterate(assoc_array_node_to_ptr(node),
  1081. node->back_pointer,
  1082. assoc_array_delete_collapse_iterator,
  1083. &collapse);
  1084. pr_devel("collapsed %d,%lu\n", collapse.slot, new_n0->nr_leaves_on_branch);
  1085. BUG_ON(collapse.slot != new_n0->nr_leaves_on_branch - 1);
  1086. if (!node->back_pointer) {
  1087. edit->set[1].ptr = &array->root;
  1088. } else if (assoc_array_ptr_is_leaf(node->back_pointer)) {
  1089. BUG();
  1090. } else if (assoc_array_ptr_is_node(node->back_pointer)) {
  1091. struct assoc_array_node *p =
  1092. assoc_array_ptr_to_node(node->back_pointer);
  1093. edit->set[1].ptr = &p->slots[node->parent_slot];
  1094. } else if (assoc_array_ptr_is_shortcut(node->back_pointer)) {
  1095. struct assoc_array_shortcut *s =
  1096. assoc_array_ptr_to_shortcut(node->back_pointer);
  1097. edit->set[1].ptr = &s->next_node;
  1098. }
  1099. edit->set[1].to = assoc_array_node_to_ptr(new_n0);
  1100. edit->excised_subtree = assoc_array_node_to_ptr(node);
  1101. }
  1102. }
  1103. return edit;
  1104. enomem:
  1105. /* Clean up after an out of memory error */
  1106. pr_devel("enomem\n");
  1107. assoc_array_cancel_edit(edit);
  1108. return ERR_PTR(-ENOMEM);
  1109. }
  1110. /**
  1111. * assoc_array_clear - Script deletion of all objects from an associative array
  1112. * @array: The array to clear.
  1113. * @ops: The operations to use.
  1114. *
  1115. * Precalculate and preallocate a script for the deletion of all the objects
  1116. * from an associative array. This results in an edit script that can either
  1117. * be applied or cancelled.
  1118. *
  1119. * The function returns a pointer to an edit script if there are objects to be
  1120. * deleted, NULL if there are no objects in the array or -ENOMEM.
  1121. *
  1122. * The caller should lock against other modifications and must continue to hold
  1123. * the lock until assoc_array_apply_edit() has been called.
  1124. *
  1125. * Accesses to the tree may take place concurrently with this function,
  1126. * provided they hold the RCU read lock.
  1127. */
  1128. struct assoc_array_edit *assoc_array_clear(struct assoc_array *array,
  1129. const struct assoc_array_ops *ops)
  1130. {
  1131. struct assoc_array_edit *edit;
  1132. pr_devel("-->%s()\n", __func__);
  1133. if (!array->root)
  1134. return NULL;
  1135. edit = kzalloc(sizeof(struct assoc_array_edit), GFP_KERNEL);
  1136. if (!edit)
  1137. return ERR_PTR(-ENOMEM);
  1138. edit->array = array;
  1139. edit->ops = ops;
  1140. edit->set[1].ptr = &array->root;
  1141. edit->set[1].to = NULL;
  1142. edit->excised_subtree = array->root;
  1143. edit->ops_for_excised_subtree = ops;
  1144. pr_devel("all gone\n");
  1145. return edit;
  1146. }
  1147. /*
  1148. * Handle the deferred destruction after an applied edit.
  1149. */
  1150. static void assoc_array_rcu_cleanup(struct rcu_head *head)
  1151. {
  1152. struct assoc_array_edit *edit =
  1153. container_of(head, struct assoc_array_edit, rcu);
  1154. int i;
  1155. pr_devel("-->%s()\n", __func__);
  1156. if (edit->dead_leaf)
  1157. edit->ops->free_object(assoc_array_ptr_to_leaf(edit->dead_leaf));
  1158. for (i = 0; i < ARRAY_SIZE(edit->excised_meta); i++)
  1159. if (edit->excised_meta[i])
  1160. kfree(assoc_array_ptr_to_node(edit->excised_meta[i]));
  1161. if (edit->excised_subtree) {
  1162. BUG_ON(assoc_array_ptr_is_leaf(edit->excised_subtree));
  1163. if (assoc_array_ptr_is_node(edit->excised_subtree)) {
  1164. struct assoc_array_node *n =
  1165. assoc_array_ptr_to_node(edit->excised_subtree);
  1166. n->back_pointer = NULL;
  1167. } else {
  1168. struct assoc_array_shortcut *s =
  1169. assoc_array_ptr_to_shortcut(edit->excised_subtree);
  1170. s->back_pointer = NULL;
  1171. }
  1172. assoc_array_destroy_subtree(edit->excised_subtree,
  1173. edit->ops_for_excised_subtree);
  1174. }
  1175. kfree(edit);
  1176. }
  1177. /**
  1178. * assoc_array_apply_edit - Apply an edit script to an associative array
  1179. * @edit: The script to apply.
  1180. *
  1181. * Apply an edit script to an associative array to effect an insertion,
  1182. * deletion or clearance. As the edit script includes preallocated memory,
  1183. * this is guaranteed not to fail.
  1184. *
  1185. * The edit script, dead objects and dead metadata will be scheduled for
  1186. * destruction after an RCU grace period to permit those doing read-only
  1187. * accesses on the array to continue to do so under the RCU read lock whilst
  1188. * the edit is taking place.
  1189. */
  1190. void assoc_array_apply_edit(struct assoc_array_edit *edit)
  1191. {
  1192. struct assoc_array_shortcut *shortcut;
  1193. struct assoc_array_node *node;
  1194. struct assoc_array_ptr *ptr;
  1195. int i;
  1196. pr_devel("-->%s()\n", __func__);
  1197. smp_wmb();
  1198. if (edit->leaf_p)
  1199. *edit->leaf_p = edit->leaf;
  1200. smp_wmb();
  1201. for (i = 0; i < ARRAY_SIZE(edit->set_parent_slot); i++)
  1202. if (edit->set_parent_slot[i].p)
  1203. *edit->set_parent_slot[i].p = edit->set_parent_slot[i].to;
  1204. smp_wmb();
  1205. for (i = 0; i < ARRAY_SIZE(edit->set_backpointers); i++)
  1206. if (edit->set_backpointers[i])
  1207. *edit->set_backpointers[i] = edit->set_backpointers_to;
  1208. smp_wmb();
  1209. for (i = 0; i < ARRAY_SIZE(edit->set); i++)
  1210. if (edit->set[i].ptr)
  1211. *edit->set[i].ptr = edit->set[i].to;
  1212. if (edit->array->root == NULL) {
  1213. edit->array->nr_leaves_on_tree = 0;
  1214. } else if (edit->adjust_count_on) {
  1215. node = edit->adjust_count_on;
  1216. for (;;) {
  1217. node->nr_leaves_on_branch += edit->adjust_count_by;
  1218. ptr = node->back_pointer;
  1219. if (!ptr)
  1220. break;
  1221. if (assoc_array_ptr_is_shortcut(ptr)) {
  1222. shortcut = assoc_array_ptr_to_shortcut(ptr);
  1223. ptr = shortcut->back_pointer;
  1224. if (!ptr)
  1225. break;
  1226. }
  1227. BUG_ON(!assoc_array_ptr_is_node(ptr));
  1228. node = assoc_array_ptr_to_node(ptr);
  1229. }
  1230. edit->array->nr_leaves_on_tree += edit->adjust_count_by;
  1231. }
  1232. call_rcu(&edit->rcu, assoc_array_rcu_cleanup);
  1233. }
  1234. /**
  1235. * assoc_array_cancel_edit - Discard an edit script.
  1236. * @edit: The script to discard.
  1237. *
  1238. * Free an edit script and all the preallocated data it holds without making
  1239. * any changes to the associative array it was intended for.
  1240. *
  1241. * NOTE! In the case of an insertion script, this does _not_ release the leaf
  1242. * that was to be inserted. That is left to the caller.
  1243. */
  1244. void assoc_array_cancel_edit(struct assoc_array_edit *edit)
  1245. {
  1246. struct assoc_array_ptr *ptr;
  1247. int i;
  1248. pr_devel("-->%s()\n", __func__);
  1249. /* Clean up after an out of memory error */
  1250. for (i = 0; i < ARRAY_SIZE(edit->new_meta); i++) {
  1251. ptr = edit->new_meta[i];
  1252. if (ptr) {
  1253. if (assoc_array_ptr_is_node(ptr))
  1254. kfree(assoc_array_ptr_to_node(ptr));
  1255. else
  1256. kfree(assoc_array_ptr_to_shortcut(ptr));
  1257. }
  1258. }
  1259. kfree(edit);
  1260. }
  1261. /**
  1262. * assoc_array_gc - Garbage collect an associative array.
  1263. * @array: The array to clean.
  1264. * @ops: The operations to use.
  1265. * @iterator: A callback function to pass judgement on each object.
  1266. * @iterator_data: Private data for the callback function.
  1267. *
  1268. * Collect garbage from an associative array and pack down the internal tree to
  1269. * save memory.
  1270. *
  1271. * The iterator function is asked to pass judgement upon each object in the
  1272. * array. If it returns false, the object is discard and if it returns true,
  1273. * the object is kept. If it returns true, it must increment the object's
  1274. * usage count (or whatever it needs to do to retain it) before returning.
  1275. *
  1276. * This function returns 0 if successful or -ENOMEM if out of memory. In the
  1277. * latter case, the array is not changed.
  1278. *
  1279. * The caller should lock against other modifications and must continue to hold
  1280. * the lock until assoc_array_apply_edit() has been called.
  1281. *
  1282. * Accesses to the tree may take place concurrently with this function,
  1283. * provided they hold the RCU read lock.
  1284. */
  1285. int assoc_array_gc(struct assoc_array *array,
  1286. const struct assoc_array_ops *ops,
  1287. bool (*iterator)(void *object, void *iterator_data),
  1288. void *iterator_data)
  1289. {
  1290. struct assoc_array_shortcut *shortcut, *new_s;
  1291. struct assoc_array_node *node, *new_n;
  1292. struct assoc_array_edit *edit;
  1293. struct assoc_array_ptr *cursor, *ptr;
  1294. struct assoc_array_ptr *new_root, *new_parent, **new_ptr_pp;
  1295. unsigned long nr_leaves_on_tree;
  1296. int keylen, slot, nr_free, next_slot, i;
  1297. pr_devel("-->%s()\n", __func__);
  1298. if (!array->root)
  1299. return 0;
  1300. edit = kzalloc(sizeof(struct assoc_array_edit), GFP_KERNEL);
  1301. if (!edit)
  1302. return -ENOMEM;
  1303. edit->array = array;
  1304. edit->ops = ops;
  1305. edit->ops_for_excised_subtree = ops;
  1306. edit->set[0].ptr = &array->root;
  1307. edit->excised_subtree = array->root;
  1308. new_root = new_parent = NULL;
  1309. new_ptr_pp = &new_root;
  1310. cursor = array->root;
  1311. descend:
  1312. /* If this point is a shortcut, then we need to duplicate it and
  1313. * advance the target cursor.
  1314. */
  1315. if (assoc_array_ptr_is_shortcut(cursor)) {
  1316. shortcut = assoc_array_ptr_to_shortcut(cursor);
  1317. keylen = round_up(shortcut->skip_to_level, ASSOC_ARRAY_KEY_CHUNK_SIZE);
  1318. keylen >>= ASSOC_ARRAY_KEY_CHUNK_SHIFT;
  1319. new_s = kmalloc(sizeof(struct assoc_array_shortcut) +
  1320. keylen * sizeof(unsigned long), GFP_KERNEL);
  1321. if (!new_s)
  1322. goto enomem;
  1323. pr_devel("dup shortcut %p -> %p\n", shortcut, new_s);
  1324. memcpy(new_s, shortcut, (sizeof(struct assoc_array_shortcut) +
  1325. keylen * sizeof(unsigned long)));
  1326. new_s->back_pointer = new_parent;
  1327. new_s->parent_slot = shortcut->parent_slot;
  1328. *new_ptr_pp = new_parent = assoc_array_shortcut_to_ptr(new_s);
  1329. new_ptr_pp = &new_s->next_node;
  1330. cursor = shortcut->next_node;
  1331. }
  1332. /* Duplicate the node at this position */
  1333. node = assoc_array_ptr_to_node(cursor);
  1334. new_n = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL);
  1335. if (!new_n)
  1336. goto enomem;
  1337. pr_devel("dup node %p -> %p\n", node, new_n);
  1338. new_n->back_pointer = new_parent;
  1339. new_n->parent_slot = node->parent_slot;
  1340. *new_ptr_pp = new_parent = assoc_array_node_to_ptr(new_n);
  1341. new_ptr_pp = NULL;
  1342. slot = 0;
  1343. continue_node:
  1344. /* Filter across any leaves and gc any subtrees */
  1345. for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
  1346. ptr = node->slots[slot];
  1347. if (!ptr)
  1348. continue;
  1349. if (assoc_array_ptr_is_leaf(ptr)) {
  1350. if (iterator(assoc_array_ptr_to_leaf(ptr),
  1351. iterator_data))
  1352. /* The iterator will have done any reference
  1353. * counting on the object for us.
  1354. */
  1355. new_n->slots[slot] = ptr;
  1356. continue;
  1357. }
  1358. new_ptr_pp = &new_n->slots[slot];
  1359. cursor = ptr;
  1360. goto descend;
  1361. }
  1362. pr_devel("-- compress node %p --\n", new_n);
  1363. /* Count up the number of empty slots in this node and work out the
  1364. * subtree leaf count.
  1365. */
  1366. new_n->nr_leaves_on_branch = 0;
  1367. nr_free = 0;
  1368. for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
  1369. ptr = new_n->slots[slot];
  1370. if (!ptr)
  1371. nr_free++;
  1372. else if (assoc_array_ptr_is_leaf(ptr))
  1373. new_n->nr_leaves_on_branch++;
  1374. }
  1375. pr_devel("free=%d, leaves=%lu\n", nr_free, new_n->nr_leaves_on_branch);
  1376. /* See what we can fold in */
  1377. next_slot = 0;
  1378. for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
  1379. struct assoc_array_shortcut *s;
  1380. struct assoc_array_node *child;
  1381. ptr = new_n->slots[slot];
  1382. if (!ptr || assoc_array_ptr_is_leaf(ptr))
  1383. continue;
  1384. s = NULL;
  1385. if (assoc_array_ptr_is_shortcut(ptr)) {
  1386. s = assoc_array_ptr_to_shortcut(ptr);
  1387. ptr = s->next_node;
  1388. }
  1389. child = assoc_array_ptr_to_node(ptr);
  1390. new_n->nr_leaves_on_branch += child->nr_leaves_on_branch;
  1391. if (child->nr_leaves_on_branch <= nr_free + 1) {
  1392. /* Fold the child node into this one */
  1393. pr_devel("[%d] fold node %lu/%d [nx %d]\n",
  1394. slot, child->nr_leaves_on_branch, nr_free + 1,
  1395. next_slot);
  1396. /* We would already have reaped an intervening shortcut
  1397. * on the way back up the tree.
  1398. */
  1399. BUG_ON(s);
  1400. new_n->slots[slot] = NULL;
  1401. nr_free++;
  1402. if (slot < next_slot)
  1403. next_slot = slot;
  1404. for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
  1405. struct assoc_array_ptr *p = child->slots[i];
  1406. if (!p)
  1407. continue;
  1408. BUG_ON(assoc_array_ptr_is_meta(p));
  1409. while (new_n->slots[next_slot])
  1410. next_slot++;
  1411. BUG_ON(next_slot >= ASSOC_ARRAY_FAN_OUT);
  1412. new_n->slots[next_slot++] = p;
  1413. nr_free--;
  1414. }
  1415. kfree(child);
  1416. } else {
  1417. pr_devel("[%d] retain node %lu/%d [nx %d]\n",
  1418. slot, child->nr_leaves_on_branch, nr_free + 1,
  1419. next_slot);
  1420. }
  1421. }
  1422. pr_devel("after: %lu\n", new_n->nr_leaves_on_branch);
  1423. nr_leaves_on_tree = new_n->nr_leaves_on_branch;
  1424. /* Excise this node if it is singly occupied by a shortcut */
  1425. if (nr_free == ASSOC_ARRAY_FAN_OUT - 1) {
  1426. for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++)
  1427. if ((ptr = new_n->slots[slot]))
  1428. break;
  1429. if (assoc_array_ptr_is_meta(ptr) &&
  1430. assoc_array_ptr_is_shortcut(ptr)) {
  1431. pr_devel("excise node %p with 1 shortcut\n", new_n);
  1432. new_s = assoc_array_ptr_to_shortcut(ptr);
  1433. new_parent = new_n->back_pointer;
  1434. slot = new_n->parent_slot;
  1435. kfree(new_n);
  1436. if (!new_parent) {
  1437. new_s->back_pointer = NULL;
  1438. new_s->parent_slot = 0;
  1439. new_root = ptr;
  1440. goto gc_complete;
  1441. }
  1442. if (assoc_array_ptr_is_shortcut(new_parent)) {
  1443. /* We can discard any preceding shortcut also */
  1444. struct assoc_array_shortcut *s =
  1445. assoc_array_ptr_to_shortcut(new_parent);
  1446. pr_devel("excise preceding shortcut\n");
  1447. new_parent = new_s->back_pointer = s->back_pointer;
  1448. slot = new_s->parent_slot = s->parent_slot;
  1449. kfree(s);
  1450. if (!new_parent) {
  1451. new_s->back_pointer = NULL;
  1452. new_s->parent_slot = 0;
  1453. new_root = ptr;
  1454. goto gc_complete;
  1455. }
  1456. }
  1457. new_s->back_pointer = new_parent;
  1458. new_s->parent_slot = slot;
  1459. new_n = assoc_array_ptr_to_node(new_parent);
  1460. new_n->slots[slot] = ptr;
  1461. goto ascend_old_tree;
  1462. }
  1463. }
  1464. /* Excise any shortcuts we might encounter that point to nodes that
  1465. * only contain leaves.
  1466. */
  1467. ptr = new_n->back_pointer;
  1468. if (!ptr)
  1469. goto gc_complete;
  1470. if (assoc_array_ptr_is_shortcut(ptr)) {
  1471. new_s = assoc_array_ptr_to_shortcut(ptr);
  1472. new_parent = new_s->back_pointer;
  1473. slot = new_s->parent_slot;
  1474. if (new_n->nr_leaves_on_branch <= ASSOC_ARRAY_FAN_OUT) {
  1475. struct assoc_array_node *n;
  1476. pr_devel("excise shortcut\n");
  1477. new_n->back_pointer = new_parent;
  1478. new_n->parent_slot = slot;
  1479. kfree(new_s);
  1480. if (!new_parent) {
  1481. new_root = assoc_array_node_to_ptr(new_n);
  1482. goto gc_complete;
  1483. }
  1484. n = assoc_array_ptr_to_node(new_parent);
  1485. n->slots[slot] = assoc_array_node_to_ptr(new_n);
  1486. }
  1487. } else {
  1488. new_parent = ptr;
  1489. }
  1490. new_n = assoc_array_ptr_to_node(new_parent);
  1491. ascend_old_tree:
  1492. ptr = node->back_pointer;
  1493. if (assoc_array_ptr_is_shortcut(ptr)) {
  1494. shortcut = assoc_array_ptr_to_shortcut(ptr);
  1495. slot = shortcut->parent_slot;
  1496. cursor = shortcut->back_pointer;
  1497. if (!cursor)
  1498. goto gc_complete;
  1499. } else {
  1500. slot = node->parent_slot;
  1501. cursor = ptr;
  1502. }
  1503. BUG_ON(!cursor);
  1504. node = assoc_array_ptr_to_node(cursor);
  1505. slot++;
  1506. goto continue_node;
  1507. gc_complete:
  1508. edit->set[0].to = new_root;
  1509. assoc_array_apply_edit(edit);
  1510. array->nr_leaves_on_tree = nr_leaves_on_tree;
  1511. return 0;
  1512. enomem:
  1513. pr_devel("enomem\n");
  1514. assoc_array_destroy_subtree(new_root, edit->ops);
  1515. kfree(edit);
  1516. return -ENOMEM;
  1517. }