|
@@ -7,6 +7,7 @@
|
|
|
#include <linux/slab.h>
|
|
|
#include <linux/export.h>
|
|
|
#include "ulist.h"
|
|
|
+#include "ctree.h"
|
|
|
|
|
|
/*
|
|
|
* ulist is a generic data structure to hold a collection of unique u64
|
|
@@ -14,10 +15,6 @@
|
|
|
* enumerating it.
|
|
|
* It is possible to store an auxiliary value along with the key.
|
|
|
*
|
|
|
- * The implementation is preliminary and can probably be sped up
|
|
|
- * significantly. A first step would be to store the values in an rbtree
|
|
|
- * as soon as ULIST_SIZE is exceeded.
|
|
|
- *
|
|
|
* A sample usage for ulists is the enumeration of directed graphs without
|
|
|
* visiting a node twice. The pseudo-code could look like this:
|
|
|
*
|
|
@@ -50,10 +47,9 @@
|
|
|
*/
|
|
|
void ulist_init(struct ulist *ulist)
|
|
|
{
|
|
|
- ulist->nnodes = 0;
|
|
|
- ulist->nodes = ulist->int_nodes;
|
|
|
- ulist->nodes_alloced = ULIST_SIZE;
|
|
|
+ INIT_LIST_HEAD(&ulist->nodes);
|
|
|
ulist->root = RB_ROOT;
|
|
|
+ ulist->nnodes = 0;
|
|
|
}
|
|
|
EXPORT_SYMBOL(ulist_init);
|
|
|
|
|
@@ -66,14 +62,14 @@ EXPORT_SYMBOL(ulist_init);
|
|
|
*/
|
|
|
void ulist_fini(struct ulist *ulist)
|
|
|
{
|
|
|
- /*
|
|
|
- * The first ULIST_SIZE elements are stored inline in struct ulist.
|
|
|
- * Only if more elements are alocated they need to be freed.
|
|
|
- */
|
|
|
- if (ulist->nodes_alloced > ULIST_SIZE)
|
|
|
- kfree(ulist->nodes);
|
|
|
- ulist->nodes_alloced = 0; /* in case ulist_fini is called twice */
|
|
|
+ struct ulist_node *node;
|
|
|
+ struct ulist_node *next;
|
|
|
+
|
|
|
+ list_for_each_entry_safe(node, next, &ulist->nodes, list) {
|
|
|
+ kfree(node);
|
|
|
+ }
|
|
|
ulist->root = RB_ROOT;
|
|
|
+ INIT_LIST_HEAD(&ulist->nodes);
|
|
|
}
|
|
|
EXPORT_SYMBOL(ulist_fini);
|
|
|
|
|
@@ -192,57 +188,29 @@ int ulist_add(struct ulist *ulist, u64 val, u64 aux, gfp_t gfp_mask)
|
|
|
int ulist_add_merge(struct ulist *ulist, u64 val, u64 aux,
|
|
|
u64 *old_aux, gfp_t gfp_mask)
|
|
|
{
|
|
|
- int ret = 0;
|
|
|
- struct ulist_node *node = NULL;
|
|
|
+ int ret;
|
|
|
+ struct ulist_node *node;
|
|
|
+
|
|
|
node = ulist_rbtree_search(ulist, val);
|
|
|
if (node) {
|
|
|
if (old_aux)
|
|
|
*old_aux = node->aux;
|
|
|
return 0;
|
|
|
}
|
|
|
+ node = kmalloc(sizeof(*node), gfp_mask);
|
|
|
+ if (!node)
|
|
|
+ return -ENOMEM;
|
|
|
|
|
|
- if (ulist->nnodes >= ulist->nodes_alloced) {
|
|
|
- u64 new_alloced = ulist->nodes_alloced + 128;
|
|
|
- struct ulist_node *new_nodes;
|
|
|
- void *old = NULL;
|
|
|
- int i;
|
|
|
-
|
|
|
- /*
|
|
|
- * if nodes_alloced == ULIST_SIZE no memory has been allocated
|
|
|
- * yet, so pass NULL to krealloc
|
|
|
- */
|
|
|
- if (ulist->nodes_alloced > ULIST_SIZE)
|
|
|
- old = ulist->nodes;
|
|
|
+ node->val = val;
|
|
|
+ node->aux = aux;
|
|
|
+#ifdef CONFIG_BTRFS_DEBUG
|
|
|
+ node->seqnum = ulist->nnodes;
|
|
|
+#endif
|
|
|
|
|
|
- new_nodes = krealloc(old, sizeof(*new_nodes) * new_alloced,
|
|
|
- gfp_mask);
|
|
|
- if (!new_nodes)
|
|
|
- return -ENOMEM;
|
|
|
-
|
|
|
- if (!old)
|
|
|
- memcpy(new_nodes, ulist->int_nodes,
|
|
|
- sizeof(ulist->int_nodes));
|
|
|
-
|
|
|
- ulist->nodes = new_nodes;
|
|
|
- ulist->nodes_alloced = new_alloced;
|
|
|
-
|
|
|
- /*
|
|
|
- * krealloc actually uses memcpy, which does not copy rb_node
|
|
|
- * pointers, so we have to do it ourselves. Otherwise we may
|
|
|
- * be bitten by crashes.
|
|
|
- */
|
|
|
- ulist->root = RB_ROOT;
|
|
|
- for (i = 0; i < ulist->nnodes; i++) {
|
|
|
- ret = ulist_rbtree_insert(ulist, &ulist->nodes[i]);
|
|
|
- if (ret < 0)
|
|
|
- return ret;
|
|
|
- }
|
|
|
- }
|
|
|
- ulist->nodes[ulist->nnodes].val = val;
|
|
|
- ulist->nodes[ulist->nnodes].aux = aux;
|
|
|
- ret = ulist_rbtree_insert(ulist, &ulist->nodes[ulist->nnodes]);
|
|
|
- BUG_ON(ret);
|
|
|
- ++ulist->nnodes;
|
|
|
+ ret = ulist_rbtree_insert(ulist, node);
|
|
|
+ ASSERT(!ret);
|
|
|
+ list_add_tail(&node->list, &ulist->nodes);
|
|
|
+ ulist->nnodes++;
|
|
|
|
|
|
return 1;
|
|
|
}
|
|
@@ -266,11 +234,26 @@ EXPORT_SYMBOL(ulist_add);
|
|
|
*/
|
|
|
struct ulist_node *ulist_next(struct ulist *ulist, struct ulist_iterator *uiter)
|
|
|
{
|
|
|
- if (ulist->nnodes == 0)
|
|
|
+ struct ulist_node *node;
|
|
|
+
|
|
|
+ if (list_empty(&ulist->nodes))
|
|
|
return NULL;
|
|
|
- if (uiter->i < 0 || uiter->i >= ulist->nnodes)
|
|
|
+ if (uiter->cur_list && uiter->cur_list->next == &ulist->nodes)
|
|
|
return NULL;
|
|
|
-
|
|
|
- return &ulist->nodes[uiter->i++];
|
|
|
+ if (uiter->cur_list) {
|
|
|
+ uiter->cur_list = uiter->cur_list->next;
|
|
|
+ } else {
|
|
|
+ uiter->cur_list = ulist->nodes.next;
|
|
|
+#ifdef CONFIG_BTRFS_DEBUG
|
|
|
+ uiter->i = 0;
|
|
|
+#endif
|
|
|
+ }
|
|
|
+ node = list_entry(uiter->cur_list, struct ulist_node, list);
|
|
|
+#ifdef CONFIG_BTRFS_DEBUG
|
|
|
+ ASSERT(node->seqnum == uiter->i);
|
|
|
+ ASSERT(uiter->i >= 0 && uiter->i < ulist->nnodes);
|
|
|
+ uiter->i++;
|
|
|
+#endif
|
|
|
+ return node;
|
|
|
}
|
|
|
EXPORT_SYMBOL(ulist_next);
|