multiorder.c 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238
  1. /*
  2. * multiorder.c: Multi-order radix tree entry testing
  3. * Copyright (c) 2016 Intel Corporation
  4. * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
  5. * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify it
  8. * under the terms and conditions of the GNU General Public License,
  9. * version 2, as published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope it will be useful, but WITHOUT
  12. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  14. * more details.
  15. */
  16. #include <linux/radix-tree.h>
  17. #include <linux/slab.h>
  18. #include <linux/errno.h>
  19. #include <pthread.h>
  20. #include "test.h"
  21. static int item_insert_order(struct xarray *xa, unsigned long index,
  22. unsigned order)
  23. {
  24. XA_STATE_ORDER(xas, xa, index, order);
  25. struct item *item = item_create(index, order);
  26. do {
  27. xas_lock(&xas);
  28. xas_store(&xas, item);
  29. xas_unlock(&xas);
  30. } while (xas_nomem(&xas, GFP_KERNEL));
  31. if (!xas_error(&xas))
  32. return 0;
  33. free(item);
  34. return xas_error(&xas);
  35. }
  36. void multiorder_iteration(struct xarray *xa)
  37. {
  38. XA_STATE(xas, xa, 0);
  39. struct item *item;
  40. int i, j, err;
  41. #define NUM_ENTRIES 11
  42. int index[NUM_ENTRIES] = {0, 2, 4, 8, 16, 32, 34, 36, 64, 72, 128};
  43. int order[NUM_ENTRIES] = {1, 1, 2, 3, 4, 1, 0, 1, 3, 0, 7};
  44. printv(1, "Multiorder iteration test\n");
  45. for (i = 0; i < NUM_ENTRIES; i++) {
  46. err = item_insert_order(xa, index[i], order[i]);
  47. assert(!err);
  48. }
  49. for (j = 0; j < 256; j++) {
  50. for (i = 0; i < NUM_ENTRIES; i++)
  51. if (j <= (index[i] | ((1 << order[i]) - 1)))
  52. break;
  53. xas_set(&xas, j);
  54. xas_for_each(&xas, item, ULONG_MAX) {
  55. int height = order[i] / XA_CHUNK_SHIFT;
  56. int shift = height * XA_CHUNK_SHIFT;
  57. unsigned long mask = (1UL << order[i]) - 1;
  58. assert((xas.xa_index | mask) == (index[i] | mask));
  59. assert(xas.xa_node->shift == shift);
  60. assert(!radix_tree_is_internal_node(item));
  61. assert((item->index | mask) == (index[i] | mask));
  62. assert(item->order == order[i]);
  63. i++;
  64. }
  65. }
  66. item_kill_tree(xa);
  67. }
  68. void multiorder_tagged_iteration(struct xarray *xa)
  69. {
  70. XA_STATE(xas, xa, 0);
  71. struct item *item;
  72. int i, j;
  73. #define MT_NUM_ENTRIES 9
  74. int index[MT_NUM_ENTRIES] = {0, 2, 4, 16, 32, 40, 64, 72, 128};
  75. int order[MT_NUM_ENTRIES] = {1, 0, 2, 4, 3, 1, 3, 0, 7};
  76. #define TAG_ENTRIES 7
  77. int tag_index[TAG_ENTRIES] = {0, 4, 16, 40, 64, 72, 128};
  78. printv(1, "Multiorder tagged iteration test\n");
  79. for (i = 0; i < MT_NUM_ENTRIES; i++)
  80. assert(!item_insert_order(xa, index[i], order[i]));
  81. assert(!xa_marked(xa, XA_MARK_1));
  82. for (i = 0; i < TAG_ENTRIES; i++)
  83. xa_set_mark(xa, tag_index[i], XA_MARK_1);
  84. for (j = 0; j < 256; j++) {
  85. int k;
  86. for (i = 0; i < TAG_ENTRIES; i++) {
  87. for (k = i; index[k] < tag_index[i]; k++)
  88. ;
  89. if (j <= (index[k] | ((1 << order[k]) - 1)))
  90. break;
  91. }
  92. xas_set(&xas, j);
  93. xas_for_each_marked(&xas, item, ULONG_MAX, XA_MARK_1) {
  94. unsigned long mask;
  95. for (k = i; index[k] < tag_index[i]; k++)
  96. ;
  97. mask = (1UL << order[k]) - 1;
  98. assert((xas.xa_index | mask) == (tag_index[i] | mask));
  99. assert(!xa_is_internal(item));
  100. assert((item->index | mask) == (tag_index[i] | mask));
  101. assert(item->order == order[k]);
  102. i++;
  103. }
  104. }
  105. assert(tag_tagged_items(xa, 0, ULONG_MAX, TAG_ENTRIES, XA_MARK_1,
  106. XA_MARK_2) == TAG_ENTRIES);
  107. for (j = 0; j < 256; j++) {
  108. int mask, k;
  109. for (i = 0; i < TAG_ENTRIES; i++) {
  110. for (k = i; index[k] < tag_index[i]; k++)
  111. ;
  112. if (j <= (index[k] | ((1 << order[k]) - 1)))
  113. break;
  114. }
  115. xas_set(&xas, j);
  116. xas_for_each_marked(&xas, item, ULONG_MAX, XA_MARK_2) {
  117. for (k = i; index[k] < tag_index[i]; k++)
  118. ;
  119. mask = (1 << order[k]) - 1;
  120. assert((xas.xa_index | mask) == (tag_index[i] | mask));
  121. assert(!xa_is_internal(item));
  122. assert((item->index | mask) == (tag_index[i] | mask));
  123. assert(item->order == order[k]);
  124. i++;
  125. }
  126. }
  127. assert(tag_tagged_items(xa, 1, ULONG_MAX, MT_NUM_ENTRIES * 2, XA_MARK_1,
  128. XA_MARK_0) == TAG_ENTRIES);
  129. i = 0;
  130. xas_set(&xas, 0);
  131. xas_for_each_marked(&xas, item, ULONG_MAX, XA_MARK_0) {
  132. assert(xas.xa_index == tag_index[i]);
  133. i++;
  134. }
  135. assert(i == TAG_ENTRIES);
  136. item_kill_tree(xa);
  137. }
  138. bool stop_iteration = false;
  139. static void *creator_func(void *ptr)
  140. {
  141. /* 'order' is set up to ensure we have sibling entries */
  142. unsigned int order = RADIX_TREE_MAP_SHIFT - 1;
  143. struct radix_tree_root *tree = ptr;
  144. int i;
  145. for (i = 0; i < 10000; i++) {
  146. item_insert_order(tree, 0, order);
  147. item_delete_rcu(tree, 0);
  148. }
  149. stop_iteration = true;
  150. return NULL;
  151. }
  152. static void *iterator_func(void *ptr)
  153. {
  154. XA_STATE(xas, ptr, 0);
  155. struct item *item;
  156. while (!stop_iteration) {
  157. rcu_read_lock();
  158. xas_for_each(&xas, item, ULONG_MAX) {
  159. if (xas_retry(&xas, item))
  160. continue;
  161. item_sanity(item, xas.xa_index);
  162. }
  163. rcu_read_unlock();
  164. }
  165. return NULL;
  166. }
  167. static void multiorder_iteration_race(struct xarray *xa)
  168. {
  169. const int num_threads = sysconf(_SC_NPROCESSORS_ONLN);
  170. pthread_t worker_thread[num_threads];
  171. int i;
  172. pthread_create(&worker_thread[0], NULL, &creator_func, xa);
  173. for (i = 1; i < num_threads; i++)
  174. pthread_create(&worker_thread[i], NULL, &iterator_func, xa);
  175. for (i = 0; i < num_threads; i++)
  176. pthread_join(worker_thread[i], NULL);
  177. item_kill_tree(xa);
  178. }
  179. static DEFINE_XARRAY(array);
  180. void multiorder_checks(void)
  181. {
  182. multiorder_iteration(&array);
  183. multiorder_tagged_iteration(&array);
  184. multiorder_iteration_race(&array);
  185. radix_tree_cpu_dead(0);
  186. }
  187. int __weak main(void)
  188. {
  189. radix_tree_init();
  190. multiorder_checks();
  191. return 0;
  192. }