|
@@ -149,33 +149,33 @@ struct bkey_float {
|
|
|
#define BSET_CACHELINE 128
|
|
|
|
|
|
/* Space required for the btree node keys */
|
|
|
-static inline size_t btree_keys_bytes(struct btree *b)
|
|
|
+static inline size_t btree_keys_bytes(struct btree_keys *b)
|
|
|
{
|
|
|
return PAGE_SIZE << b->page_order;
|
|
|
}
|
|
|
|
|
|
-static inline size_t btree_keys_cachelines(struct btree *b)
|
|
|
+static inline size_t btree_keys_cachelines(struct btree_keys *b)
|
|
|
{
|
|
|
return btree_keys_bytes(b) / BSET_CACHELINE;
|
|
|
}
|
|
|
|
|
|
/* Space required for the auxiliary search trees */
|
|
|
-static inline size_t bset_tree_bytes(struct btree *b)
|
|
|
+static inline size_t bset_tree_bytes(struct btree_keys *b)
|
|
|
{
|
|
|
return btree_keys_cachelines(b) * sizeof(struct bkey_float);
|
|
|
}
|
|
|
|
|
|
/* Space required for the prev pointers */
|
|
|
-static inline size_t bset_prev_bytes(struct btree *b)
|
|
|
+static inline size_t bset_prev_bytes(struct btree_keys *b)
|
|
|
{
|
|
|
return btree_keys_cachelines(b) * sizeof(uint8_t);
|
|
|
}
|
|
|
|
|
|
/* Memory allocation */
|
|
|
|
|
|
-void bch_btree_keys_free(struct btree *b)
|
|
|
+void bch_btree_keys_free(struct btree_keys *b)
|
|
|
{
|
|
|
- struct bset_tree *t = b->sets;
|
|
|
+ struct bset_tree *t = b->set;
|
|
|
|
|
|
if (bset_prev_bytes(b) < PAGE_SIZE)
|
|
|
kfree(t->prev);
|
|
@@ -195,10 +195,11 @@ void bch_btree_keys_free(struct btree *b)
|
|
|
t->tree = NULL;
|
|
|
t->data = NULL;
|
|
|
}
|
|
|
+EXPORT_SYMBOL(bch_btree_keys_free);
|
|
|
|
|
|
-int bch_btree_keys_alloc(struct btree *b, unsigned page_order, gfp_t gfp)
|
|
|
+int bch_btree_keys_alloc(struct btree_keys *b, unsigned page_order, gfp_t gfp)
|
|
|
{
|
|
|
- struct bset_tree *t = b->sets;
|
|
|
+ struct bset_tree *t = b->set;
|
|
|
|
|
|
BUG_ON(t->data);
|
|
|
|
|
@@ -225,6 +226,29 @@ err:
|
|
|
bch_btree_keys_free(b);
|
|
|
return -ENOMEM;
|
|
|
}
|
|
|
+EXPORT_SYMBOL(bch_btree_keys_alloc);
|
|
|
+
|
|
|
+void bch_btree_keys_init(struct btree_keys *b, const struct btree_keys_ops *ops,
|
|
|
+ bool *expensive_debug_checks)
|
|
|
+{
|
|
|
+ unsigned i;
|
|
|
+
|
|
|
+ b->ops = ops;
|
|
|
+ b->expensive_debug_checks = expensive_debug_checks;
|
|
|
+ b->nsets = 0;
|
|
|
+ b->last_set_unwritten = 0;
|
|
|
+
|
|
|
+ /* XXX: shouldn't be needed */
|
|
|
+ for (i = 0; i < MAX_BSETS; i++)
|
|
|
+ b->set[i].size = 0;
|
|
|
+ /*
|
|
|
+ * Second loop starts at 1 because b->keys[0]->data is the memory we
|
|
|
+ * allocated
|
|
|
+ */
|
|
|
+ for (i = 1; i < MAX_BSETS; i++)
|
|
|
+ b->set[i].data = NULL;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(bch_btree_keys_init);
|
|
|
|
|
|
/* Binary tree stuff for auxiliary search trees */
|
|
|
|
|
@@ -448,9 +472,9 @@ static void make_bfloat(struct bset_tree *t, unsigned j)
|
|
|
f->exponent = 127;
|
|
|
}
|
|
|
|
|
|
-static void bset_alloc_tree(struct btree *b, struct bset_tree *t)
|
|
|
+static void bset_alloc_tree(struct btree_keys *b, struct bset_tree *t)
|
|
|
{
|
|
|
- if (t != b->sets) {
|
|
|
+ if (t != b->set) {
|
|
|
unsigned j = roundup(t[-1].size,
|
|
|
64 / sizeof(struct bkey_float));
|
|
|
|
|
@@ -458,27 +482,30 @@ static void bset_alloc_tree(struct btree *b, struct bset_tree *t)
|
|
|
t->prev = t[-1].prev + j;
|
|
|
}
|
|
|
|
|
|
- while (t < b->sets + MAX_BSETS)
|
|
|
+ while (t < b->set + MAX_BSETS)
|
|
|
t++->size = 0;
|
|
|
}
|
|
|
|
|
|
-static void bch_bset_build_unwritten_tree(struct btree *b)
|
|
|
+static void bch_bset_build_unwritten_tree(struct btree_keys *b)
|
|
|
{
|
|
|
struct bset_tree *t = bset_tree_last(b);
|
|
|
|
|
|
+ BUG_ON(b->last_set_unwritten);
|
|
|
+ b->last_set_unwritten = 1;
|
|
|
+
|
|
|
bset_alloc_tree(b, t);
|
|
|
|
|
|
- if (t->tree != b->sets->tree + btree_keys_cachelines(b)) {
|
|
|
+ if (t->tree != b->set->tree + btree_keys_cachelines(b)) {
|
|
|
t->prev[0] = bkey_to_cacheline_offset(t->data->start);
|
|
|
t->size = 1;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-void bch_bset_init_next(struct btree *b, struct bset *i, uint64_t magic)
|
|
|
+void bch_bset_init_next(struct btree_keys *b, struct bset *i, uint64_t magic)
|
|
|
{
|
|
|
- if (i != b->sets->data) {
|
|
|
- b->sets[++b->nsets].data = i;
|
|
|
- i->seq = b->sets->data->seq;
|
|
|
+ if (i != b->set->data) {
|
|
|
+ b->set[++b->nsets].data = i;
|
|
|
+ i->seq = b->set->data->seq;
|
|
|
} else
|
|
|
get_random_bytes(&i->seq, sizeof(uint64_t));
|
|
|
|
|
@@ -488,18 +515,21 @@ void bch_bset_init_next(struct btree *b, struct bset *i, uint64_t magic)
|
|
|
|
|
|
bch_bset_build_unwritten_tree(b);
|
|
|
}
|
|
|
+EXPORT_SYMBOL(bch_bset_init_next);
|
|
|
|
|
|
-static void bset_build_written_tree(struct btree *b)
|
|
|
+void bch_bset_build_written_tree(struct btree_keys *b)
|
|
|
{
|
|
|
struct bset_tree *t = bset_tree_last(b);
|
|
|
struct bkey *k = t->data->start;
|
|
|
unsigned j, cacheline = 1;
|
|
|
|
|
|
+ b->last_set_unwritten = 0;
|
|
|
+
|
|
|
bset_alloc_tree(b, t);
|
|
|
|
|
|
t->size = min_t(unsigned,
|
|
|
bkey_to_cacheline(t, bset_bkey_last(t->data)),
|
|
|
- b->sets->tree + btree_keys_cachelines(b) - t->tree);
|
|
|
+ b->set->tree + btree_keys_cachelines(b) - t->tree);
|
|
|
|
|
|
if (t->size < 2) {
|
|
|
t->size = 0;
|
|
@@ -532,13 +562,14 @@ static void bset_build_written_tree(struct btree *b)
|
|
|
j = inorder_next(j, t->size))
|
|
|
make_bfloat(t, j);
|
|
|
}
|
|
|
+EXPORT_SYMBOL(bch_bset_build_written_tree);
|
|
|
|
|
|
-void bch_bset_fix_invalidated_key(struct btree *b, struct bkey *k)
|
|
|
+void bch_bset_fix_invalidated_key(struct btree_keys *b, struct bkey *k)
|
|
|
{
|
|
|
struct bset_tree *t;
|
|
|
unsigned inorder, j = 1;
|
|
|
|
|
|
- for (t = b->sets; t <= bset_tree_last(b); t++)
|
|
|
+ for (t = b->set; t <= bset_tree_last(b); t++)
|
|
|
if (k < bset_bkey_last(t->data))
|
|
|
goto found_set;
|
|
|
|
|
@@ -577,8 +608,9 @@ fix_right: do {
|
|
|
j = j * 2 + 1;
|
|
|
} while (j < t->size);
|
|
|
}
|
|
|
+EXPORT_SYMBOL(bch_bset_fix_invalidated_key);
|
|
|
|
|
|
-static void bch_bset_fix_lookup_table(struct btree *b,
|
|
|
+static void bch_bset_fix_lookup_table(struct btree_keys *b,
|
|
|
struct bset_tree *t,
|
|
|
struct bkey *k)
|
|
|
{
|
|
@@ -613,7 +645,7 @@ static void bch_bset_fix_lookup_table(struct btree *b,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- if (t->size == b->sets->tree + btree_keys_cachelines(b) - t->tree)
|
|
|
+ if (t->size == b->set->tree + btree_keys_cachelines(b) - t->tree)
|
|
|
return;
|
|
|
|
|
|
/* Possibly add a new entry to the end of the lookup table */
|
|
@@ -627,12 +659,12 @@ static void bch_bset_fix_lookup_table(struct btree *b,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-void bch_bset_insert(struct btree *b, struct bkey *where,
|
|
|
+void bch_bset_insert(struct btree_keys *b, struct bkey *where,
|
|
|
struct bkey *insert)
|
|
|
{
|
|
|
struct bset_tree *t = bset_tree_last(b);
|
|
|
|
|
|
- BUG_ON(t->data != write_block(b));
|
|
|
+ BUG_ON(!b->last_set_unwritten);
|
|
|
BUG_ON(bset_byte_offset(b, t->data) +
|
|
|
__set_bytes(t->data, t->data->keys + bkey_u64s(insert)) >
|
|
|
PAGE_SIZE << b->page_order);
|
|
@@ -645,20 +677,17 @@ void bch_bset_insert(struct btree *b, struct bkey *where,
|
|
|
bkey_copy(where, insert);
|
|
|
bch_bset_fix_lookup_table(b, t, where);
|
|
|
}
|
|
|
+EXPORT_SYMBOL(bch_bset_insert);
|
|
|
|
|
|
struct bset_search_iter {
|
|
|
struct bkey *l, *r;
|
|
|
};
|
|
|
|
|
|
-static struct bset_search_iter bset_search_write_set(struct btree *b,
|
|
|
- struct bset_tree *t,
|
|
|
+static struct bset_search_iter bset_search_write_set(struct bset_tree *t,
|
|
|
const struct bkey *search)
|
|
|
{
|
|
|
unsigned li = 0, ri = t->size;
|
|
|
|
|
|
- BUG_ON(!b->nsets &&
|
|
|
- t->size < bkey_to_cacheline(t, bset_bkey_last(t->data)));
|
|
|
-
|
|
|
while (li + 1 != ri) {
|
|
|
unsigned m = (li + ri) >> 1;
|
|
|
|
|
@@ -674,8 +703,7 @@ static struct bset_search_iter bset_search_write_set(struct btree *b,
|
|
|
};
|
|
|
}
|
|
|
|
|
|
-static struct bset_search_iter bset_search_tree(struct btree *b,
|
|
|
- struct bset_tree *t,
|
|
|
+static struct bset_search_iter bset_search_tree(struct bset_tree *t,
|
|
|
const struct bkey *search)
|
|
|
{
|
|
|
struct bkey *l, *r;
|
|
@@ -759,7 +787,7 @@ struct bkey *__bch_bset_search(struct btree *b, struct bset_tree *t,
|
|
|
if (unlikely(!t->size)) {
|
|
|
i.l = t->data->start;
|
|
|
i.r = bset_bkey_last(t->data);
|
|
|
- } else if (bset_written(b, t)) {
|
|
|
+ } else if (bset_written(&b->keys, t)) {
|
|
|
/*
|
|
|
* Each node in the auxiliary search tree covers a certain range
|
|
|
* of bits, and keys above and below the set it covers might
|
|
@@ -773,12 +801,16 @@ struct bkey *__bch_bset_search(struct btree *b, struct bset_tree *t,
|
|
|
if (unlikely(bkey_cmp(search, t->data->start) < 0))
|
|
|
return t->data->start;
|
|
|
|
|
|
- i = bset_search_tree(b, t, search);
|
|
|
- } else
|
|
|
- i = bset_search_write_set(b, t, search);
|
|
|
+ i = bset_search_tree(t, search);
|
|
|
+ } else {
|
|
|
+ BUG_ON(!b->keys.nsets &&
|
|
|
+ t->size < bkey_to_cacheline(t, bset_bkey_last(t->data)));
|
|
|
+
|
|
|
+ i = bset_search_write_set(t, search);
|
|
|
+ }
|
|
|
|
|
|
if (expensive_debug_checks(b->c)) {
|
|
|
- BUG_ON(bset_written(b, t) &&
|
|
|
+ BUG_ON(bset_written(&b->keys, t) &&
|
|
|
i.l != t->data->start &&
|
|
|
bkey_cmp(tree_to_prev_bkey(t,
|
|
|
inorder_to_tree(bkey_to_cacheline(t, i.l), t)),
|
|
@@ -794,6 +826,7 @@ struct bkey *__bch_bset_search(struct btree *b, struct bset_tree *t,
|
|
|
|
|
|
return i.l;
|
|
|
}
|
|
|
+EXPORT_SYMBOL(__bch_bset_search);
|
|
|
|
|
|
/* Btree iterator */
|
|
|
|
|
@@ -833,7 +866,7 @@ static struct bkey *__bch_btree_iter_init(struct btree *b,
|
|
|
iter->b = b;
|
|
|
#endif
|
|
|
|
|
|
- for (; start <= &b->sets[b->nsets]; start++) {
|
|
|
+ for (; start <= bset_tree_last(&b->keys); start++) {
|
|
|
ret = bch_bset_search(b, start, search);
|
|
|
bch_btree_iter_push(iter, ret, bset_bkey_last(start->data));
|
|
|
}
|
|
@@ -845,8 +878,9 @@ struct bkey *bch_btree_iter_init(struct btree *b,
|
|
|
struct btree_iter *iter,
|
|
|
struct bkey *search)
|
|
|
{
|
|
|
- return __bch_btree_iter_init(b, iter, search, b->sets);
|
|
|
+ return __bch_btree_iter_init(b, iter, search, b->keys.set);
|
|
|
}
|
|
|
+EXPORT_SYMBOL(bch_btree_iter_init);
|
|
|
|
|
|
static inline struct bkey *__bch_btree_iter_next(struct btree_iter *iter,
|
|
|
btree_iter_cmp_fn *cmp)
|
|
@@ -879,9 +913,10 @@ struct bkey *bch_btree_iter_next(struct btree_iter *iter)
|
|
|
return __bch_btree_iter_next(iter, btree_iter_cmp);
|
|
|
|
|
|
}
|
|
|
+EXPORT_SYMBOL(bch_btree_iter_next);
|
|
|
|
|
|
struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter,
|
|
|
- struct btree *b, ptr_filter_fn fn)
|
|
|
+ struct btree_keys *b, ptr_filter_fn fn)
|
|
|
{
|
|
|
struct bkey *ret;
|
|
|
|
|
@@ -913,15 +948,16 @@ int bch_bset_sort_state_init(struct bset_sort_state *state, unsigned page_order)
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
|
+EXPORT_SYMBOL(bch_bset_sort_state_init);
|
|
|
|
|
|
-static void btree_mergesort(struct btree *b, struct bset *out,
|
|
|
+static void btree_mergesort(struct btree_keys *b, struct bset *out,
|
|
|
struct btree_iter *iter,
|
|
|
bool fixup, bool remove_stale)
|
|
|
{
|
|
|
int i;
|
|
|
struct bkey *k, *last = NULL;
|
|
|
BKEY_PADDED(k) tmp;
|
|
|
- bool (*bad)(struct btree *, const struct bkey *) = remove_stale
|
|
|
+ bool (*bad)(struct btree_keys *, const struct bkey *) = remove_stale
|
|
|
? bch_ptr_bad
|
|
|
: bch_ptr_invalid;
|
|
|
|
|
@@ -955,7 +991,7 @@ static void btree_mergesort(struct btree *b, struct bset *out,
|
|
|
pr_debug("sorted %i keys", out->keys);
|
|
|
}
|
|
|
|
|
|
-static void __btree_sort(struct btree *b, struct btree_iter *iter,
|
|
|
+static void __btree_sort(struct btree_keys *b, struct btree_iter *iter,
|
|
|
unsigned start, unsigned order, bool fixup,
|
|
|
struct bset_sort_state *state)
|
|
|
{
|
|
@@ -968,7 +1004,7 @@ static void __btree_sort(struct btree *b, struct btree_iter *iter,
|
|
|
|
|
|
out = page_address(mempool_alloc(state->pool, GFP_NOIO));
|
|
|
used_mempool = true;
|
|
|
- order = ilog2(bucket_pages(b->c));
|
|
|
+ order = state->page_order;
|
|
|
}
|
|
|
|
|
|
start_time = local_clock();
|
|
@@ -983,13 +1019,13 @@ static void __btree_sort(struct btree *b, struct btree_iter *iter,
|
|
|
* memcpy()
|
|
|
*/
|
|
|
|
|
|
- out->magic = bset_magic(&b->c->sb);
|
|
|
- out->seq = b->sets[0].data->seq;
|
|
|
- out->version = b->sets[0].data->version;
|
|
|
- swap(out, b->sets[0].data);
|
|
|
+ out->magic = b->set->data->magic;
|
|
|
+ out->seq = b->set->data->seq;
|
|
|
+ out->version = b->set->data->version;
|
|
|
+ swap(out, b->set->data);
|
|
|
} else {
|
|
|
- b->sets[start].data->keys = out->keys;
|
|
|
- memcpy(b->sets[start].data->start, out->start,
|
|
|
+ b->set[start].data->keys = out->keys;
|
|
|
+ memcpy(b->set[start].data->start, out->start,
|
|
|
(void *) bset_bkey_last(out) - (void *) out->start);
|
|
|
}
|
|
|
|
|
@@ -998,7 +1034,7 @@ static void __btree_sort(struct btree *b, struct btree_iter *iter,
|
|
|
else
|
|
|
free_pages((unsigned long) out, order);
|
|
|
|
|
|
- bset_build_written_tree(b);
|
|
|
+ bch_bset_build_written_tree(b);
|
|
|
|
|
|
if (!start)
|
|
|
bch_time_stats_update(&state->time, start_time);
|
|
@@ -1007,34 +1043,32 @@ static void __btree_sort(struct btree *b, struct btree_iter *iter,
|
|
|
void bch_btree_sort_partial(struct btree *b, unsigned start,
|
|
|
struct bset_sort_state *state)
|
|
|
{
|
|
|
- size_t order = b->page_order, keys = 0;
|
|
|
+ size_t order = b->keys.page_order, keys = 0;
|
|
|
struct btree_iter iter;
|
|
|
int oldsize = bch_count_data(b);
|
|
|
|
|
|
- __bch_btree_iter_init(b, &iter, NULL, &b->sets[start]);
|
|
|
-
|
|
|
- BUG_ON(!bset_written(b, bset_tree_last(b)) &&
|
|
|
- (bset_tree_last(b)->size || b->nsets));
|
|
|
+ __bch_btree_iter_init(b, &iter, NULL, &b->keys.set[start]);
|
|
|
|
|
|
if (start) {
|
|
|
unsigned i;
|
|
|
|
|
|
- for (i = start; i <= b->nsets; i++)
|
|
|
- keys += b->sets[i].data->keys;
|
|
|
+ for (i = start; i <= b->keys.nsets; i++)
|
|
|
+ keys += b->keys.set[i].data->keys;
|
|
|
|
|
|
- order = roundup_pow_of_two(__set_bytes(b->sets->data,
|
|
|
+ order = roundup_pow_of_two(__set_bytes(b->keys.set->data,
|
|
|
keys)) / PAGE_SIZE;
|
|
|
if (order)
|
|
|
order = ilog2(order);
|
|
|
}
|
|
|
|
|
|
- __btree_sort(b, &iter, start, order, false, state);
|
|
|
+ __btree_sort(&b->keys, &iter, start, order, false, state);
|
|
|
|
|
|
EBUG_ON(b->written && oldsize >= 0 && bch_count_data(b) != oldsize);
|
|
|
}
|
|
|
EXPORT_SYMBOL(bch_btree_sort_partial);
|
|
|
|
|
|
-void bch_btree_sort_and_fix_extents(struct btree *b, struct btree_iter *iter,
|
|
|
+void bch_btree_sort_and_fix_extents(struct btree_keys *b,
|
|
|
+ struct btree_iter *iter,
|
|
|
struct bset_sort_state *state)
|
|
|
{
|
|
|
__btree_sort(b, iter, 0, b->page_order, true, state);
|
|
@@ -1048,11 +1082,11 @@ void bch_btree_sort_into(struct btree *b, struct btree *new,
|
|
|
struct btree_iter iter;
|
|
|
bch_btree_iter_init(b, &iter, NULL);
|
|
|
|
|
|
- btree_mergesort(b, new->sets->data, &iter, false, true);
|
|
|
+ btree_mergesort(&b->keys, new->keys.set->data, &iter, false, true);
|
|
|
|
|
|
bch_time_stats_update(&state->time, start_time);
|
|
|
|
|
|
- new->sets->size = 0;
|
|
|
+ new->keys.set->size = 0; // XXX: why?
|
|
|
}
|
|
|
|
|
|
#define SORT_CRIT (4096 / sizeof(uint64_t))
|
|
@@ -1062,28 +1096,31 @@ void bch_btree_sort_lazy(struct btree *b, struct bset_sort_state *state)
|
|
|
unsigned crit = SORT_CRIT;
|
|
|
int i;
|
|
|
|
|
|
+ b->keys.last_set_unwritten = 0;
|
|
|
+
|
|
|
/* Don't sort if nothing to do */
|
|
|
- if (!b->nsets)
|
|
|
+ if (!b->keys.nsets)
|
|
|
goto out;
|
|
|
|
|
|
- for (i = b->nsets - 1; i >= 0; --i) {
|
|
|
+ for (i = b->keys.nsets - 1; i >= 0; --i) {
|
|
|
crit *= state->crit_factor;
|
|
|
|
|
|
- if (b->sets[i].data->keys < crit) {
|
|
|
+ if (b->keys.set[i].data->keys < crit) {
|
|
|
bch_btree_sort_partial(b, i, state);
|
|
|
return;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
/* Sort if we'd overflow */
|
|
|
- if (b->nsets + 1 == MAX_BSETS) {
|
|
|
+ if (b->keys.nsets + 1 == MAX_BSETS) {
|
|
|
bch_btree_sort(b, state);
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
out:
|
|
|
- bset_build_written_tree(b);
|
|
|
+ bch_bset_build_written_tree(&b->keys);
|
|
|
}
|
|
|
+EXPORT_SYMBOL(bch_btree_sort_lazy);
|
|
|
|
|
|
/* Sysfs stuff */
|
|
|
|
|
@@ -1102,12 +1139,12 @@ static int btree_bset_stats(struct btree_op *op, struct btree *b)
|
|
|
|
|
|
stats->nodes++;
|
|
|
|
|
|
- for (i = 0; i <= b->nsets; i++) {
|
|
|
- struct bset_tree *t = &b->sets[i];
|
|
|
+ for (i = 0; i <= b->keys.nsets; i++) {
|
|
|
+ struct bset_tree *t = &b->keys.set[i];
|
|
|
size_t bytes = t->data->keys * sizeof(uint64_t);
|
|
|
size_t j;
|
|
|
|
|
|
- if (bset_written(b, t)) {
|
|
|
+ if (bset_written(&b->keys, t)) {
|
|
|
stats->sets_written++;
|
|
|
stats->bytes_written += bytes;
|
|
|
|