multiorder.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635
  1. /*
  2. * multiorder.c: Multi-order radix tree entry testing
  3. * Copyright (c) 2016 Intel Corporation
  4. * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
  5. * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify it
  8. * under the terms and conditions of the GNU General Public License,
  9. * version 2, as published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope it will be useful, but WITHOUT
  12. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  14. * more details.
  15. */
  16. #include <linux/radix-tree.h>
  17. #include <linux/slab.h>
  18. #include <linux/errno.h>
  19. #include "test.h"
  20. #define for_each_index(i, base, order) \
  21. for (i = base; i < base + (1 << order); i++)
  22. static void __multiorder_tag_test(int index, int order)
  23. {
  24. RADIX_TREE(tree, GFP_KERNEL);
  25. int base, err, i;
  26. /* our canonical entry */
  27. base = index & ~((1 << order) - 1);
  28. printf("Multiorder tag test with index %d, canonical entry %d\n",
  29. index, base);
  30. err = item_insert_order(&tree, index, order);
  31. assert(!err);
  32. /*
  33. * Verify we get collisions for covered indices. We try and fail to
  34. * insert an exceptional entry so we don't leak memory via
  35. * item_insert_order().
  36. */
  37. for_each_index(i, base, order) {
  38. err = __radix_tree_insert(&tree, i, order,
  39. (void *)(0xA0 | RADIX_TREE_EXCEPTIONAL_ENTRY));
  40. assert(err == -EEXIST);
  41. }
  42. for_each_index(i, base, order) {
  43. assert(!radix_tree_tag_get(&tree, i, 0));
  44. assert(!radix_tree_tag_get(&tree, i, 1));
  45. }
  46. assert(radix_tree_tag_set(&tree, index, 0));
  47. for_each_index(i, base, order) {
  48. assert(radix_tree_tag_get(&tree, i, 0));
  49. assert(!radix_tree_tag_get(&tree, i, 1));
  50. }
  51. assert(tag_tagged_items(&tree, NULL, 0, ~0UL, 10, 0, 1) == 1);
  52. assert(radix_tree_tag_clear(&tree, index, 0));
  53. for_each_index(i, base, order) {
  54. assert(!radix_tree_tag_get(&tree, i, 0));
  55. assert(radix_tree_tag_get(&tree, i, 1));
  56. }
  57. assert(radix_tree_tag_clear(&tree, index, 1));
  58. assert(!radix_tree_tagged(&tree, 0));
  59. assert(!radix_tree_tagged(&tree, 1));
  60. item_kill_tree(&tree);
  61. }
  62. static void __multiorder_tag_test2(unsigned order, unsigned long index2)
  63. {
  64. RADIX_TREE(tree, GFP_KERNEL);
  65. unsigned long index = (1 << order);
  66. index2 += index;
  67. assert(item_insert_order(&tree, 0, order) == 0);
  68. assert(item_insert(&tree, index2) == 0);
  69. assert(radix_tree_tag_set(&tree, 0, 0));
  70. assert(radix_tree_tag_set(&tree, index2, 0));
  71. assert(tag_tagged_items(&tree, NULL, 0, ~0UL, 10, 0, 1) == 2);
  72. item_kill_tree(&tree);
  73. }
  74. static void multiorder_tag_tests(void)
  75. {
  76. int i, j;
  77. /* test multi-order entry for indices 0-7 with no sibling pointers */
  78. __multiorder_tag_test(0, 3);
  79. __multiorder_tag_test(5, 3);
  80. /* test multi-order entry for indices 8-15 with no sibling pointers */
  81. __multiorder_tag_test(8, 3);
  82. __multiorder_tag_test(15, 3);
  83. /*
  84. * Our order 5 entry covers indices 0-31 in a tree with height=2.
  85. * This is broken up as follows:
  86. * 0-7: canonical entry
  87. * 8-15: sibling 1
  88. * 16-23: sibling 2
  89. * 24-31: sibling 3
  90. */
  91. __multiorder_tag_test(0, 5);
  92. __multiorder_tag_test(29, 5);
  93. /* same test, but with indices 32-63 */
  94. __multiorder_tag_test(32, 5);
  95. __multiorder_tag_test(44, 5);
  96. /*
  97. * Our order 8 entry covers indices 0-255 in a tree with height=3.
  98. * This is broken up as follows:
  99. * 0-63: canonical entry
  100. * 64-127: sibling 1
  101. * 128-191: sibling 2
  102. * 192-255: sibling 3
  103. */
  104. __multiorder_tag_test(0, 8);
  105. __multiorder_tag_test(190, 8);
  106. /* same test, but with indices 256-511 */
  107. __multiorder_tag_test(256, 8);
  108. __multiorder_tag_test(300, 8);
  109. __multiorder_tag_test(0x12345678UL, 8);
  110. for (i = 1; i < 10; i++)
  111. for (j = 0; j < (10 << i); j++)
  112. __multiorder_tag_test2(i, j);
  113. }
  114. static void multiorder_check(unsigned long index, int order)
  115. {
  116. unsigned long i;
  117. unsigned long min = index & ~((1UL << order) - 1);
  118. unsigned long max = min + (1UL << order);
  119. void **slot;
  120. struct item *item2 = item_create(min, order);
  121. RADIX_TREE(tree, GFP_KERNEL);
  122. printf("Multiorder index %ld, order %d\n", index, order);
  123. assert(item_insert_order(&tree, index, order) == 0);
  124. for (i = min; i < max; i++) {
  125. struct item *item = item_lookup(&tree, i);
  126. assert(item != 0);
  127. assert(item->index == index);
  128. }
  129. for (i = 0; i < min; i++)
  130. item_check_absent(&tree, i);
  131. for (i = max; i < 2*max; i++)
  132. item_check_absent(&tree, i);
  133. for (i = min; i < max; i++)
  134. assert(radix_tree_insert(&tree, i, item2) == -EEXIST);
  135. slot = radix_tree_lookup_slot(&tree, index);
  136. free(*slot);
  137. radix_tree_replace_slot(&tree, slot, item2);
  138. for (i = min; i < max; i++) {
  139. struct item *item = item_lookup(&tree, i);
  140. assert(item != 0);
  141. assert(item->index == min);
  142. }
  143. assert(item_delete(&tree, min) != 0);
  144. for (i = 0; i < 2*max; i++)
  145. item_check_absent(&tree, i);
  146. }
  147. static void multiorder_shrink(unsigned long index, int order)
  148. {
  149. unsigned long i;
  150. unsigned long max = 1 << order;
  151. RADIX_TREE(tree, GFP_KERNEL);
  152. struct radix_tree_node *node;
  153. printf("Multiorder shrink index %ld, order %d\n", index, order);
  154. assert(item_insert_order(&tree, 0, order) == 0);
  155. node = tree.rnode;
  156. assert(item_insert(&tree, index) == 0);
  157. assert(node != tree.rnode);
  158. assert(item_delete(&tree, index) != 0);
  159. assert(node == tree.rnode);
  160. for (i = 0; i < max; i++) {
  161. struct item *item = item_lookup(&tree, i);
  162. assert(item != 0);
  163. assert(item->index == 0);
  164. }
  165. for (i = max; i < 2*max; i++)
  166. item_check_absent(&tree, i);
  167. if (!item_delete(&tree, 0)) {
  168. printf("failed to delete index %ld (order %d)\n", index, order); abort();
  169. }
  170. for (i = 0; i < 2*max; i++)
  171. item_check_absent(&tree, i);
  172. }
  173. static void multiorder_insert_bug(void)
  174. {
  175. RADIX_TREE(tree, GFP_KERNEL);
  176. item_insert(&tree, 0);
  177. radix_tree_tag_set(&tree, 0, 0);
  178. item_insert_order(&tree, 3 << 6, 6);
  179. item_kill_tree(&tree);
  180. }
  181. void multiorder_iteration(void)
  182. {
  183. RADIX_TREE(tree, GFP_KERNEL);
  184. struct radix_tree_iter iter;
  185. void **slot;
  186. int i, j, err;
  187. printf("Multiorder iteration test\n");
  188. #define NUM_ENTRIES 11
  189. int index[NUM_ENTRIES] = {0, 2, 4, 8, 16, 32, 34, 36, 64, 72, 128};
  190. int order[NUM_ENTRIES] = {1, 1, 2, 3, 4, 1, 0, 1, 3, 0, 7};
  191. for (i = 0; i < NUM_ENTRIES; i++) {
  192. err = item_insert_order(&tree, index[i], order[i]);
  193. assert(!err);
  194. }
  195. for (j = 0; j < 256; j++) {
  196. for (i = 0; i < NUM_ENTRIES; i++)
  197. if (j <= (index[i] | ((1 << order[i]) - 1)))
  198. break;
  199. radix_tree_for_each_slot(slot, &tree, &iter, j) {
  200. int height = order[i] / RADIX_TREE_MAP_SHIFT;
  201. int shift = height * RADIX_TREE_MAP_SHIFT;
  202. unsigned long mask = (1UL << order[i]) - 1;
  203. struct item *item = *slot;
  204. assert((iter.index | mask) == (index[i] | mask));
  205. assert(iter.shift == shift);
  206. assert(!radix_tree_is_internal_node(item));
  207. assert((item->index | mask) == (index[i] | mask));
  208. assert(item->order == order[i]);
  209. i++;
  210. }
  211. }
  212. item_kill_tree(&tree);
  213. }
  214. void multiorder_tagged_iteration(void)
  215. {
  216. RADIX_TREE(tree, GFP_KERNEL);
  217. struct radix_tree_iter iter;
  218. void **slot;
  219. int i, j;
  220. printf("Multiorder tagged iteration test\n");
  221. #define MT_NUM_ENTRIES 9
  222. int index[MT_NUM_ENTRIES] = {0, 2, 4, 16, 32, 40, 64, 72, 128};
  223. int order[MT_NUM_ENTRIES] = {1, 0, 2, 4, 3, 1, 3, 0, 7};
  224. #define TAG_ENTRIES 7
  225. int tag_index[TAG_ENTRIES] = {0, 4, 16, 40, 64, 72, 128};
  226. for (i = 0; i < MT_NUM_ENTRIES; i++)
  227. assert(!item_insert_order(&tree, index[i], order[i]));
  228. assert(!radix_tree_tagged(&tree, 1));
  229. for (i = 0; i < TAG_ENTRIES; i++)
  230. assert(radix_tree_tag_set(&tree, tag_index[i], 1));
  231. for (j = 0; j < 256; j++) {
  232. int k;
  233. for (i = 0; i < TAG_ENTRIES; i++) {
  234. for (k = i; index[k] < tag_index[i]; k++)
  235. ;
  236. if (j <= (index[k] | ((1 << order[k]) - 1)))
  237. break;
  238. }
  239. radix_tree_for_each_tagged(slot, &tree, &iter, j, 1) {
  240. unsigned long mask;
  241. struct item *item = *slot;
  242. for (k = i; index[k] < tag_index[i]; k++)
  243. ;
  244. mask = (1UL << order[k]) - 1;
  245. assert((iter.index | mask) == (tag_index[i] | mask));
  246. assert(!radix_tree_is_internal_node(item));
  247. assert((item->index | mask) == (tag_index[i] | mask));
  248. assert(item->order == order[k]);
  249. i++;
  250. }
  251. }
  252. assert(tag_tagged_items(&tree, NULL, 0, ~0UL, TAG_ENTRIES, 1, 2) ==
  253. TAG_ENTRIES);
  254. for (j = 0; j < 256; j++) {
  255. int mask, k;
  256. for (i = 0; i < TAG_ENTRIES; i++) {
  257. for (k = i; index[k] < tag_index[i]; k++)
  258. ;
  259. if (j <= (index[k] | ((1 << order[k]) - 1)))
  260. break;
  261. }
  262. radix_tree_for_each_tagged(slot, &tree, &iter, j, 2) {
  263. struct item *item = *slot;
  264. for (k = i; index[k] < tag_index[i]; k++)
  265. ;
  266. mask = (1 << order[k]) - 1;
  267. assert((iter.index | mask) == (tag_index[i] | mask));
  268. assert(!radix_tree_is_internal_node(item));
  269. assert((item->index | mask) == (tag_index[i] | mask));
  270. assert(item->order == order[k]);
  271. i++;
  272. }
  273. }
  274. assert(tag_tagged_items(&tree, NULL, 1, ~0UL, MT_NUM_ENTRIES * 2, 1, 0)
  275. == TAG_ENTRIES);
  276. i = 0;
  277. radix_tree_for_each_tagged(slot, &tree, &iter, 0, 0) {
  278. assert(iter.index == tag_index[i]);
  279. i++;
  280. }
  281. item_kill_tree(&tree);
  282. }
  283. static void multiorder_join1(unsigned long index,
  284. unsigned order1, unsigned order2)
  285. {
  286. unsigned long loc;
  287. void *item, *item2 = item_create(index + 1, order1);
  288. RADIX_TREE(tree, GFP_KERNEL);
  289. item_insert_order(&tree, index, order2);
  290. item = radix_tree_lookup(&tree, index);
  291. radix_tree_join(&tree, index + 1, order1, item2);
  292. loc = find_item(&tree, item);
  293. if (loc == -1)
  294. free(item);
  295. item = radix_tree_lookup(&tree, index + 1);
  296. assert(item == item2);
  297. item_kill_tree(&tree);
  298. }
  299. static void multiorder_join2(unsigned order1, unsigned order2)
  300. {
  301. RADIX_TREE(tree, GFP_KERNEL);
  302. struct radix_tree_node *node;
  303. void *item1 = item_create(0, order1);
  304. void *item2;
  305. item_insert_order(&tree, 0, order2);
  306. radix_tree_insert(&tree, 1 << order2, (void *)0x12UL);
  307. item2 = __radix_tree_lookup(&tree, 1 << order2, &node, NULL);
  308. assert(item2 == (void *)0x12UL);
  309. assert(node->exceptional == 1);
  310. radix_tree_join(&tree, 0, order1, item1);
  311. item2 = __radix_tree_lookup(&tree, 1 << order2, &node, NULL);
  312. assert(item2 == item1);
  313. assert(node->exceptional == 0);
  314. item_kill_tree(&tree);
  315. }
  316. /*
  317. * This test revealed an accounting bug for exceptional entries at one point.
  318. * Nodes were being freed back into the pool with an elevated exception count
  319. * by radix_tree_join() and then radix_tree_split() was failing to zero the
  320. * count of exceptional entries.
  321. */
  322. static void multiorder_join3(unsigned int order)
  323. {
  324. RADIX_TREE(tree, GFP_KERNEL);
  325. struct radix_tree_node *node;
  326. void **slot;
  327. struct radix_tree_iter iter;
  328. unsigned long i;
  329. for (i = 0; i < (1 << order); i++) {
  330. radix_tree_insert(&tree, i, (void *)0x12UL);
  331. }
  332. radix_tree_join(&tree, 0, order, (void *)0x16UL);
  333. rcu_barrier();
  334. radix_tree_split(&tree, 0, 0);
  335. radix_tree_for_each_slot(slot, &tree, &iter, 0) {
  336. radix_tree_iter_replace(&tree, &iter, slot, (void *)0x12UL);
  337. }
  338. __radix_tree_lookup(&tree, 0, &node, NULL);
  339. assert(node->exceptional == node->count);
  340. item_kill_tree(&tree);
  341. }
  342. static void multiorder_join(void)
  343. {
  344. int i, j, idx;
  345. for (idx = 0; idx < 1024; idx = idx * 2 + 3) {
  346. for (i = 1; i < 15; i++) {
  347. for (j = 0; j < i; j++) {
  348. multiorder_join1(idx, i, j);
  349. }
  350. }
  351. }
  352. for (i = 1; i < 15; i++) {
  353. for (j = 0; j < i; j++) {
  354. multiorder_join2(i, j);
  355. }
  356. }
  357. for (i = 3; i < 10; i++) {
  358. multiorder_join3(i);
  359. }
  360. }
  361. static void check_mem(unsigned old_order, unsigned new_order, unsigned alloc)
  362. {
  363. struct radix_tree_preload *rtp = &radix_tree_preloads;
  364. if (rtp->nr != 0)
  365. printf("split(%u %u) remaining %u\n", old_order, new_order,
  366. rtp->nr);
  367. /*
  368. * Can't check for equality here as some nodes may have been
  369. * RCU-freed while we ran. But we should never finish with more
  370. * nodes allocated since they should have all been preloaded.
  371. */
  372. if (nr_allocated > alloc)
  373. printf("split(%u %u) allocated %u %u\n", old_order, new_order,
  374. alloc, nr_allocated);
  375. }
  376. static void __multiorder_split(int old_order, int new_order)
  377. {
  378. RADIX_TREE(tree, GFP_ATOMIC);
  379. void **slot;
  380. struct radix_tree_iter iter;
  381. unsigned alloc;
  382. radix_tree_preload(GFP_KERNEL);
  383. assert(item_insert_order(&tree, 0, old_order) == 0);
  384. radix_tree_preload_end();
  385. /* Wipe out the preloaded cache or it'll confuse check_mem() */
  386. radix_tree_cpu_dead(0);
  387. radix_tree_tag_set(&tree, 0, 2);
  388. radix_tree_split_preload(old_order, new_order, GFP_KERNEL);
  389. alloc = nr_allocated;
  390. radix_tree_split(&tree, 0, new_order);
  391. check_mem(old_order, new_order, alloc);
  392. radix_tree_for_each_slot(slot, &tree, &iter, 0) {
  393. radix_tree_iter_replace(&tree, &iter, slot,
  394. item_create(iter.index, new_order));
  395. }
  396. radix_tree_preload_end();
  397. item_kill_tree(&tree);
  398. }
  399. static void __multiorder_split2(int old_order, int new_order)
  400. {
  401. RADIX_TREE(tree, GFP_KERNEL);
  402. void **slot;
  403. struct radix_tree_iter iter;
  404. struct radix_tree_node *node;
  405. void *item;
  406. __radix_tree_insert(&tree, 0, old_order, (void *)0x12);
  407. item = __radix_tree_lookup(&tree, 0, &node, NULL);
  408. assert(item == (void *)0x12);
  409. assert(node->exceptional > 0);
  410. radix_tree_split(&tree, 0, new_order);
  411. radix_tree_for_each_slot(slot, &tree, &iter, 0) {
  412. radix_tree_iter_replace(&tree, &iter, slot,
  413. item_create(iter.index, new_order));
  414. }
  415. item = __radix_tree_lookup(&tree, 0, &node, NULL);
  416. assert(item != (void *)0x12);
  417. assert(node->exceptional == 0);
  418. item_kill_tree(&tree);
  419. }
  420. static void __multiorder_split3(int old_order, int new_order)
  421. {
  422. RADIX_TREE(tree, GFP_KERNEL);
  423. void **slot;
  424. struct radix_tree_iter iter;
  425. struct radix_tree_node *node;
  426. void *item;
  427. __radix_tree_insert(&tree, 0, old_order, (void *)0x12);
  428. item = __radix_tree_lookup(&tree, 0, &node, NULL);
  429. assert(item == (void *)0x12);
  430. assert(node->exceptional > 0);
  431. radix_tree_split(&tree, 0, new_order);
  432. radix_tree_for_each_slot(slot, &tree, &iter, 0) {
  433. radix_tree_iter_replace(&tree, &iter, slot, (void *)0x16);
  434. }
  435. item = __radix_tree_lookup(&tree, 0, &node, NULL);
  436. assert(item == (void *)0x16);
  437. assert(node->exceptional > 0);
  438. item_kill_tree(&tree);
  439. __radix_tree_insert(&tree, 0, old_order, (void *)0x12);
  440. item = __radix_tree_lookup(&tree, 0, &node, NULL);
  441. assert(item == (void *)0x12);
  442. assert(node->exceptional > 0);
  443. radix_tree_split(&tree, 0, new_order);
  444. radix_tree_for_each_slot(slot, &tree, &iter, 0) {
  445. if (iter.index == (1 << new_order))
  446. radix_tree_iter_replace(&tree, &iter, slot,
  447. (void *)0x16);
  448. else
  449. radix_tree_iter_replace(&tree, &iter, slot, NULL);
  450. }
  451. item = __radix_tree_lookup(&tree, 1 << new_order, &node, NULL);
  452. assert(item == (void *)0x16);
  453. assert(node->count == node->exceptional);
  454. do {
  455. node = node->parent;
  456. if (!node)
  457. break;
  458. assert(node->count == 1);
  459. assert(node->exceptional == 0);
  460. } while (1);
  461. item_kill_tree(&tree);
  462. }
  463. static void multiorder_split(void)
  464. {
  465. int i, j;
  466. for (i = 3; i < 11; i++)
  467. for (j = 0; j < i; j++) {
  468. __multiorder_split(i, j);
  469. __multiorder_split2(i, j);
  470. __multiorder_split3(i, j);
  471. }
  472. }
  473. static void multiorder_account(void)
  474. {
  475. RADIX_TREE(tree, GFP_KERNEL);
  476. struct radix_tree_node *node;
  477. void **slot;
  478. item_insert_order(&tree, 0, 5);
  479. __radix_tree_insert(&tree, 1 << 5, 5, (void *)0x12);
  480. __radix_tree_lookup(&tree, 0, &node, NULL);
  481. assert(node->count == node->exceptional * 2);
  482. radix_tree_delete(&tree, 1 << 5);
  483. assert(node->exceptional == 0);
  484. __radix_tree_insert(&tree, 1 << 5, 5, (void *)0x12);
  485. __radix_tree_lookup(&tree, 1 << 5, &node, &slot);
  486. assert(node->count == node->exceptional * 2);
  487. __radix_tree_replace(&tree, node, slot, NULL, NULL, NULL);
  488. assert(node->exceptional == 0);
  489. item_kill_tree(&tree);
  490. }
  491. void multiorder_checks(void)
  492. {
  493. int i;
  494. for (i = 0; i < 20; i++) {
  495. multiorder_check(200, i);
  496. multiorder_check(0, i);
  497. multiorder_check((1UL << i) + 1, i);
  498. }
  499. for (i = 0; i < 15; i++)
  500. multiorder_shrink((1UL << (i + RADIX_TREE_MAP_SHIFT)), i);
  501. multiorder_insert_bug();
  502. multiorder_tag_tests();
  503. multiorder_iteration();
  504. multiorder_tagged_iteration();
  505. multiorder_join();
  506. multiorder_split();
  507. multiorder_account();
  508. radix_tree_cpu_dead(0);
  509. }