usnic_uiom_interval_tree.c 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236
  1. #include <linux/init.h>
  2. #include <linux/list.h>
  3. #include <linux/slab.h>
  4. #include <linux/list_sort.h>
  5. #include <linux/interval_tree_generic.h>
  6. #include "usnic_uiom_interval_tree.h"
  7. #define START(node) ((node)->start)
  8. #define LAST(node) ((node)->last)
  9. #define MAKE_NODE(node, start, end, ref_cnt, flags, err, err_out) \
  10. do { \
  11. node = usnic_uiom_interval_node_alloc(start, \
  12. end, ref_cnt, flags); \
  13. if (!node) { \
  14. err = -ENOMEM; \
  15. goto err_out; \
  16. } \
  17. } while (0)
  18. #define MARK_FOR_ADD(node, list) (list_add_tail(&node->link, list))
  19. #define MAKE_NODE_AND_APPEND(node, start, end, ref_cnt, flags, err, \
  20. err_out, list) \
  21. do { \
  22. MAKE_NODE(node, start, end, \
  23. ref_cnt, flags, err, \
  24. err_out); \
  25. MARK_FOR_ADD(node, list); \
  26. } while (0)
  27. #define FLAGS_EQUAL(flags1, flags2, mask) \
  28. (((flags1) & (mask)) == ((flags2) & (mask)))
  29. static struct usnic_uiom_interval_node*
  30. usnic_uiom_interval_node_alloc(long int start, long int last, int ref_cnt,
  31. int flags)
  32. {
  33. struct usnic_uiom_interval_node *interval = kzalloc(sizeof(*interval),
  34. GFP_ATOMIC);
  35. if (!interval)
  36. return NULL;
  37. interval->start = start;
  38. interval->last = last;
  39. interval->flags = flags;
  40. interval->ref_cnt = ref_cnt;
  41. return interval;
  42. }
  43. static int interval_cmp(void *priv, struct list_head *a, struct list_head *b)
  44. {
  45. struct usnic_uiom_interval_node *node_a, *node_b;
  46. node_a = list_entry(a, struct usnic_uiom_interval_node, link);
  47. node_b = list_entry(b, struct usnic_uiom_interval_node, link);
  48. /* long to int */
  49. if (node_a->start < node_b->start)
  50. return -1;
  51. else if (node_a->start > node_b->start)
  52. return 1;
  53. return 0;
  54. }
  55. static void
  56. find_intervals_intersection_sorted(struct rb_root *root, unsigned long start,
  57. unsigned long last,
  58. struct list_head *list)
  59. {
  60. struct usnic_uiom_interval_node *node;
  61. INIT_LIST_HEAD(list);
  62. for (node = usnic_uiom_interval_tree_iter_first(root, start, last);
  63. node;
  64. node = usnic_uiom_interval_tree_iter_next(node, start, last))
  65. list_add_tail(&node->link, list);
  66. list_sort(NULL, list, interval_cmp);
  67. }
  68. int usnic_uiom_get_intervals_diff(unsigned long start, unsigned long last,
  69. int flags, int flag_mask,
  70. struct rb_root *root,
  71. struct list_head *diff_set)
  72. {
  73. struct usnic_uiom_interval_node *interval, *tmp;
  74. int err = 0;
  75. long int pivot = start;
  76. LIST_HEAD(intersection_set);
  77. INIT_LIST_HEAD(diff_set);
  78. find_intervals_intersection_sorted(root, start, last,
  79. &intersection_set);
  80. list_for_each_entry(interval, &intersection_set, link) {
  81. if (pivot < interval->start) {
  82. MAKE_NODE_AND_APPEND(tmp, pivot, interval->start - 1,
  83. 1, flags, err, err_out,
  84. diff_set);
  85. pivot = interval->start;
  86. }
  87. /*
  88. * Invariant: Set [start, pivot] is either in diff_set or root,
  89. * but not in both.
  90. */
  91. if (pivot > interval->last) {
  92. continue;
  93. } else if (pivot <= interval->last &&
  94. FLAGS_EQUAL(interval->flags, flags,
  95. flag_mask)) {
  96. pivot = interval->last + 1;
  97. }
  98. }
  99. if (pivot <= last)
  100. MAKE_NODE_AND_APPEND(tmp, pivot, last, 1, flags, err, err_out,
  101. diff_set);
  102. return 0;
  103. err_out:
  104. list_for_each_entry_safe(interval, tmp, diff_set, link) {
  105. list_del(&interval->link);
  106. kfree(interval);
  107. }
  108. return err;
  109. }
  110. void usnic_uiom_put_interval_set(struct list_head *intervals)
  111. {
  112. struct usnic_uiom_interval_node *interval, *tmp;
  113. list_for_each_entry_safe(interval, tmp, intervals, link)
  114. kfree(interval);
  115. }
  116. int usnic_uiom_insert_interval(struct rb_root *root, unsigned long start,
  117. unsigned long last, int flags)
  118. {
  119. struct usnic_uiom_interval_node *interval, *tmp;
  120. unsigned long istart, ilast;
  121. int iref_cnt, iflags;
  122. unsigned long lpivot = start;
  123. int err = 0;
  124. LIST_HEAD(to_add);
  125. LIST_HEAD(intersection_set);
  126. find_intervals_intersection_sorted(root, start, last,
  127. &intersection_set);
  128. list_for_each_entry(interval, &intersection_set, link) {
  129. /*
  130. * Invariant - lpivot is the left edge of next interval to be
  131. * inserted
  132. */
  133. istart = interval->start;
  134. ilast = interval->last;
  135. iref_cnt = interval->ref_cnt;
  136. iflags = interval->flags;
  137. if (istart < lpivot) {
  138. MAKE_NODE_AND_APPEND(tmp, istart, lpivot - 1, iref_cnt,
  139. iflags, err, err_out, &to_add);
  140. } else if (istart > lpivot) {
  141. MAKE_NODE_AND_APPEND(tmp, lpivot, istart - 1, 1, flags,
  142. err, err_out, &to_add);
  143. lpivot = istart;
  144. } else {
  145. lpivot = istart;
  146. }
  147. if (ilast > last) {
  148. MAKE_NODE_AND_APPEND(tmp, lpivot, last, iref_cnt + 1,
  149. iflags | flags, err, err_out,
  150. &to_add);
  151. MAKE_NODE_AND_APPEND(tmp, last + 1, ilast, iref_cnt,
  152. iflags, err, err_out, &to_add);
  153. } else {
  154. MAKE_NODE_AND_APPEND(tmp, lpivot, ilast, iref_cnt + 1,
  155. iflags | flags, err, err_out,
  156. &to_add);
  157. }
  158. lpivot = ilast + 1;
  159. }
  160. if (lpivot <= last)
  161. MAKE_NODE_AND_APPEND(tmp, lpivot, last, 1, flags, err, err_out,
  162. &to_add);
  163. list_for_each_entry_safe(interval, tmp, &intersection_set, link) {
  164. usnic_uiom_interval_tree_remove(interval, root);
  165. kfree(interval);
  166. }
  167. list_for_each_entry(interval, &to_add, link)
  168. usnic_uiom_interval_tree_insert(interval, root);
  169. return 0;
  170. err_out:
  171. list_for_each_entry_safe(interval, tmp, &to_add, link)
  172. kfree(interval);
  173. return err;
  174. }
  175. void usnic_uiom_remove_interval(struct rb_root *root, unsigned long start,
  176. unsigned long last, struct list_head *removed)
  177. {
  178. struct usnic_uiom_interval_node *interval;
  179. for (interval = usnic_uiom_interval_tree_iter_first(root, start, last);
  180. interval;
  181. interval = usnic_uiom_interval_tree_iter_next(interval,
  182. start,
  183. last)) {
  184. if (--interval->ref_cnt == 0)
  185. list_add_tail(&interval->link, removed);
  186. }
  187. list_for_each_entry(interval, removed, link)
  188. usnic_uiom_interval_tree_remove(interval, root);
  189. }
  190. INTERVAL_TREE_DEFINE(struct usnic_uiom_interval_node, rb,
  191. unsigned long, __subtree_last,
  192. START, LAST, , usnic_uiom_interval_tree)