|
@@ -29,12 +29,6 @@
|
|
|
#include "locking.h"
|
|
|
#include "ref-cache.h"
|
|
|
|
|
|
-#define BLOCK_GROUP_DATA EXTENT_WRITEBACK
|
|
|
-#define BLOCK_GROUP_METADATA EXTENT_UPTODATE
|
|
|
-#define BLOCK_GROUP_SYSTEM EXTENT_NEW
|
|
|
-
|
|
|
-#define BLOCK_GROUP_DIRTY EXTENT_DIRTY
|
|
|
-
|
|
|
static int finish_current_insert(struct btrfs_trans_handle *trans, struct
|
|
|
btrfs_root *extent_root);
|
|
|
static int del_pending_extents(struct btrfs_trans_handle *trans, struct
|
|
@@ -62,6 +56,127 @@ void maybe_unlock_mutex(struct btrfs_root *root)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
|
|
|
+{
|
|
|
+ return (cache->flags & bits) == bits;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * this adds the block group to the fs_info rb tree for the block group
|
|
|
+ * cache
|
|
|
+ */
|
|
|
+int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
|
|
|
+ struct btrfs_block_group_cache *block_group)
|
|
|
+{
|
|
|
+ struct rb_node **p;
|
|
|
+ struct rb_node *parent = NULL;
|
|
|
+ struct btrfs_block_group_cache *cache;
|
|
|
+
|
|
|
+ spin_lock(&info->block_group_cache_lock);
|
|
|
+ p = &info->block_group_cache_tree.rb_node;
|
|
|
+
|
|
|
+ while (*p) {
|
|
|
+ parent = *p;
|
|
|
+ cache = rb_entry(parent, struct btrfs_block_group_cache,
|
|
|
+ cache_node);
|
|
|
+ if (block_group->key.objectid < cache->key.objectid) {
|
|
|
+ p = &(*p)->rb_left;
|
|
|
+ } else if (block_group->key.objectid > cache->key.objectid) {
|
|
|
+ p = &(*p)->rb_right;
|
|
|
+ } else {
|
|
|
+ spin_unlock(&info->block_group_cache_lock);
|
|
|
+ return -EEXIST;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ rb_link_node(&block_group->cache_node, parent, p);
|
|
|
+ rb_insert_color(&block_group->cache_node,
|
|
|
+ &info->block_group_cache_tree);
|
|
|
+ spin_unlock(&info->block_group_cache_lock);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * This will return the block group at or after bytenr if contains is 0, else
|
|
|
+ * it will return the block group that contains the bytenr
|
|
|
+ */
|
|
|
+static struct btrfs_block_group_cache *
|
|
|
+block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
|
|
|
+ int contains)
|
|
|
+{
|
|
|
+ struct btrfs_block_group_cache *cache, *ret = NULL;
|
|
|
+ struct rb_node *n;
|
|
|
+ u64 end, start;
|
|
|
+
|
|
|
+ spin_lock(&info->block_group_cache_lock);
|
|
|
+ n = info->block_group_cache_tree.rb_node;
|
|
|
+
|
|
|
+ while (n) {
|
|
|
+ cache = rb_entry(n, struct btrfs_block_group_cache,
|
|
|
+ cache_node);
|
|
|
+ end = cache->key.objectid + cache->key.offset - 1;
|
|
|
+ start = cache->key.objectid;
|
|
|
+
|
|
|
+ if (bytenr < start) {
|
|
|
+ if (!contains && (!ret || start < ret->key.objectid))
|
|
|
+ ret = cache;
|
|
|
+ n = n->rb_left;
|
|
|
+ } else if (bytenr > start) {
|
|
|
+ if (contains && bytenr <= end) {
|
|
|
+ ret = cache;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ n = n->rb_right;
|
|
|
+ } else {
|
|
|
+ ret = cache;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ spin_unlock(&info->block_group_cache_lock);
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * this is only called by cache_block_group, since we could have freed extents
|
|
|
+ * we need to check the pinned_extents for any extents that can't be used yet
|
|
|
+ * since their free space will be released as soon as the transaction commits.
|
|
|
+ */
|
|
|
+static int add_new_free_space(struct btrfs_block_group_cache *block_group,
|
|
|
+ struct btrfs_fs_info *info, u64 start, u64 end)
|
|
|
+{
|
|
|
+ u64 extent_start, extent_end, size;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ while (start < end) {
|
|
|
+ ret = find_first_extent_bit(&info->pinned_extents, start,
|
|
|
+ &extent_start, &extent_end,
|
|
|
+ EXTENT_DIRTY);
|
|
|
+ if (ret)
|
|
|
+ break;
|
|
|
+
|
|
|
+ if (extent_start == start) {
|
|
|
+ start = extent_end + 1;
|
|
|
+ } else if (extent_start > start && extent_start < end) {
|
|
|
+ size = extent_start - start;
|
|
|
+ ret = btrfs_add_free_space(block_group, start, size);
|
|
|
+ BUG_ON(ret);
|
|
|
+ start = extent_end + 1;
|
|
|
+ } else {
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ if (start < end) {
|
|
|
+ size = end - start;
|
|
|
+ ret = btrfs_add_free_space(block_group, start, size);
|
|
|
+ BUG_ON(ret);
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
static int cache_block_group(struct btrfs_root *root,
|
|
|
struct btrfs_block_group_cache *block_group)
|
|
|
{
|
|
@@ -69,10 +184,8 @@ static int cache_block_group(struct btrfs_root *root,
|
|
|
int ret = 0;
|
|
|
struct btrfs_key key;
|
|
|
struct extent_buffer *leaf;
|
|
|
- struct extent_io_tree *free_space_cache;
|
|
|
int slot;
|
|
|
u64 last = 0;
|
|
|
- u64 hole_size;
|
|
|
u64 first_free;
|
|
|
int found = 0;
|
|
|
|
|
@@ -80,7 +193,6 @@ static int cache_block_group(struct btrfs_root *root,
|
|
|
return 0;
|
|
|
|
|
|
root = root->fs_info->extent_root;
|
|
|
- free_space_cache = &root->fs_info->free_space_cache;
|
|
|
|
|
|
if (block_group->cached)
|
|
|
return 0;
|
|
@@ -96,7 +208,8 @@ static int cache_block_group(struct btrfs_root *root,
|
|
|
* skip the locking here
|
|
|
*/
|
|
|
path->skip_locking = 1;
|
|
|
- first_free = block_group->key.objectid;
|
|
|
+ first_free = max_t(u64, block_group->key.objectid,
|
|
|
+ BTRFS_SUPER_INFO_OFFSET + BTRFS_SUPER_INFO_SIZE);
|
|
|
key.objectid = block_group->key.objectid;
|
|
|
key.offset = 0;
|
|
|
btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
|
|
@@ -119,32 +232,28 @@ static int cache_block_group(struct btrfs_root *root,
|
|
|
ret = btrfs_next_leaf(root, path);
|
|
|
if (ret < 0)
|
|
|
goto err;
|
|
|
- if (ret == 0) {
|
|
|
+ if (ret == 0)
|
|
|
continue;
|
|
|
- } else {
|
|
|
+ else
|
|
|
break;
|
|
|
- }
|
|
|
}
|
|
|
btrfs_item_key_to_cpu(leaf, &key, slot);
|
|
|
- if (key.objectid < block_group->key.objectid) {
|
|
|
+ if (key.objectid < block_group->key.objectid)
|
|
|
goto next;
|
|
|
- }
|
|
|
+
|
|
|
if (key.objectid >= block_group->key.objectid +
|
|
|
- block_group->key.offset) {
|
|
|
+ block_group->key.offset)
|
|
|
break;
|
|
|
- }
|
|
|
|
|
|
if (btrfs_key_type(&key) == BTRFS_EXTENT_ITEM_KEY) {
|
|
|
if (!found) {
|
|
|
last = first_free;
|
|
|
found = 1;
|
|
|
}
|
|
|
- if (key.objectid > last) {
|
|
|
- hole_size = key.objectid - last;
|
|
|
- set_extent_dirty(free_space_cache, last,
|
|
|
- last + hole_size - 1,
|
|
|
- GFP_NOFS);
|
|
|
- }
|
|
|
+
|
|
|
+ add_new_free_space(block_group, root->fs_info, last,
|
|
|
+ key.objectid);
|
|
|
+
|
|
|
last = key.objectid + key.offset;
|
|
|
}
|
|
|
next:
|
|
@@ -153,13 +262,11 @@ next:
|
|
|
|
|
|
if (!found)
|
|
|
last = first_free;
|
|
|
- if (block_group->key.objectid +
|
|
|
- block_group->key.offset > last) {
|
|
|
- hole_size = block_group->key.objectid +
|
|
|
- block_group->key.offset - last;
|
|
|
- set_extent_dirty(free_space_cache, last,
|
|
|
- last + hole_size - 1, GFP_NOFS);
|
|
|
- }
|
|
|
+
|
|
|
+ add_new_free_space(block_group, root->fs_info, last,
|
|
|
+ block_group->key.objectid +
|
|
|
+ block_group->key.offset);
|
|
|
+
|
|
|
block_group->cached = 1;
|
|
|
ret = 0;
|
|
|
err:
|
|
@@ -167,166 +274,79 @@ err:
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * return the block group that starts at or after bytenr
|
|
|
+ */
|
|
|
struct btrfs_block_group_cache *btrfs_lookup_first_block_group(struct
|
|
|
btrfs_fs_info *info,
|
|
|
u64 bytenr)
|
|
|
{
|
|
|
- struct extent_io_tree *block_group_cache;
|
|
|
- struct btrfs_block_group_cache *block_group = NULL;
|
|
|
- u64 ptr;
|
|
|
- u64 start;
|
|
|
- u64 end;
|
|
|
- int ret;
|
|
|
+ struct btrfs_block_group_cache *cache;
|
|
|
|
|
|
- bytenr = max_t(u64, bytenr,
|
|
|
- BTRFS_SUPER_INFO_OFFSET + BTRFS_SUPER_INFO_SIZE);
|
|
|
- block_group_cache = &info->block_group_cache;
|
|
|
- ret = find_first_extent_bit(block_group_cache,
|
|
|
- bytenr, &start, &end,
|
|
|
- BLOCK_GROUP_DATA | BLOCK_GROUP_METADATA |
|
|
|
- BLOCK_GROUP_SYSTEM);
|
|
|
- if (ret) {
|
|
|
- return NULL;
|
|
|
- }
|
|
|
- ret = get_state_private(block_group_cache, start, &ptr);
|
|
|
- if (ret)
|
|
|
- return NULL;
|
|
|
+ cache = block_group_cache_tree_search(info, bytenr, 0);
|
|
|
|
|
|
- block_group = (struct btrfs_block_group_cache *)(unsigned long)ptr;
|
|
|
- return block_group;
|
|
|
+ return cache;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * return the block group that contains teh given bytenr
|
|
|
+ */
|
|
|
struct btrfs_block_group_cache *btrfs_lookup_block_group(struct
|
|
|
btrfs_fs_info *info,
|
|
|
u64 bytenr)
|
|
|
{
|
|
|
- struct extent_io_tree *block_group_cache;
|
|
|
- struct btrfs_block_group_cache *block_group = NULL;
|
|
|
- u64 ptr;
|
|
|
- u64 start;
|
|
|
- u64 end;
|
|
|
- int ret;
|
|
|
+ struct btrfs_block_group_cache *cache;
|
|
|
|
|
|
- bytenr = max_t(u64, bytenr,
|
|
|
- BTRFS_SUPER_INFO_OFFSET + BTRFS_SUPER_INFO_SIZE);
|
|
|
- block_group_cache = &info->block_group_cache;
|
|
|
- ret = find_first_extent_bit(block_group_cache,
|
|
|
- bytenr, &start, &end,
|
|
|
- BLOCK_GROUP_DATA | BLOCK_GROUP_METADATA |
|
|
|
- BLOCK_GROUP_SYSTEM);
|
|
|
- if (ret) {
|
|
|
- return NULL;
|
|
|
- }
|
|
|
- ret = get_state_private(block_group_cache, start, &ptr);
|
|
|
- if (ret)
|
|
|
- return NULL;
|
|
|
+ cache = block_group_cache_tree_search(info, bytenr, 1);
|
|
|
|
|
|
- block_group = (struct btrfs_block_group_cache *)(unsigned long)ptr;
|
|
|
- if (block_group->key.objectid <= bytenr && bytenr <
|
|
|
- block_group->key.objectid + block_group->key.offset)
|
|
|
- return block_group;
|
|
|
- return NULL;
|
|
|
+ return cache;
|
|
|
}
|
|
|
|
|
|
-static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
|
|
|
-{
|
|
|
- return (cache->flags & bits) == bits;
|
|
|
-}
|
|
|
-
|
|
|
-static int noinline find_search_start(struct btrfs_root *root,
|
|
|
- struct btrfs_block_group_cache **cache_ret,
|
|
|
- u64 *start_ret, u64 num, int data)
|
|
|
+static int noinline find_free_space(struct btrfs_root *root,
|
|
|
+ struct btrfs_block_group_cache **cache_ret,
|
|
|
+ u64 *start_ret, u64 num, int data)
|
|
|
{
|
|
|
int ret;
|
|
|
struct btrfs_block_group_cache *cache = *cache_ret;
|
|
|
- struct extent_io_tree *free_space_cache;
|
|
|
- struct extent_state *state;
|
|
|
+ struct btrfs_free_space *info = NULL;
|
|
|
u64 last;
|
|
|
- u64 start = 0;
|
|
|
- u64 cache_miss = 0;
|
|
|
u64 total_fs_bytes;
|
|
|
u64 search_start = *start_ret;
|
|
|
- int wrapped = 0;
|
|
|
|
|
|
WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
|
|
|
total_fs_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy);
|
|
|
- free_space_cache = &root->fs_info->free_space_cache;
|
|
|
|
|
|
if (!cache)
|
|
|
goto out;
|
|
|
|
|
|
+ last = max(search_start, cache->key.objectid);
|
|
|
+
|
|
|
again:
|
|
|
ret = cache_block_group(root, cache);
|
|
|
- if (ret) {
|
|
|
+ if (ret)
|
|
|
goto out;
|
|
|
- }
|
|
|
|
|
|
- last = max(search_start, cache->key.objectid);
|
|
|
- if (!block_group_bits(cache, data) || cache->ro)
|
|
|
+ if (cache->ro || !block_group_bits(cache, data))
|
|
|
goto new_group;
|
|
|
|
|
|
- spin_lock_irq(&free_space_cache->lock);
|
|
|
- state = find_first_extent_bit_state(free_space_cache, last, EXTENT_DIRTY);
|
|
|
- while(1) {
|
|
|
- if (!state) {
|
|
|
- if (!cache_miss)
|
|
|
- cache_miss = last;
|
|
|
- spin_unlock_irq(&free_space_cache->lock);
|
|
|
- goto new_group;
|
|
|
- }
|
|
|
-
|
|
|
- start = max(last, state->start);
|
|
|
- last = state->end + 1;
|
|
|
- if (last - start < num) {
|
|
|
- do {
|
|
|
- state = extent_state_next(state);
|
|
|
- } while(state && !(state->state & EXTENT_DIRTY));
|
|
|
- continue;
|
|
|
- }
|
|
|
- spin_unlock_irq(&free_space_cache->lock);
|
|
|
- if (cache->ro) {
|
|
|
- goto new_group;
|
|
|
- }
|
|
|
- if (start + num > cache->key.objectid + cache->key.offset)
|
|
|
- goto new_group;
|
|
|
- if (!block_group_bits(cache, data)) {
|
|
|
- printk("block group bits don't match %Lu %d\n", cache->flags, data);
|
|
|
- }
|
|
|
- *start_ret = start;
|
|
|
+ info = btrfs_find_free_space(cache, last, num);
|
|
|
+ if (info) {
|
|
|
+ *start_ret = info->offset;
|
|
|
return 0;
|
|
|
}
|
|
|
-out:
|
|
|
- cache = btrfs_lookup_block_group(root->fs_info, search_start);
|
|
|
- if (!cache) {
|
|
|
- printk("Unable to find block group for %Lu\n", search_start);
|
|
|
- WARN_ON(1);
|
|
|
- }
|
|
|
- return -ENOSPC;
|
|
|
|
|
|
new_group:
|
|
|
last = cache->key.objectid + cache->key.offset;
|
|
|
-wrapped:
|
|
|
+
|
|
|
cache = btrfs_lookup_first_block_group(root->fs_info, last);
|
|
|
- if (!cache || cache->key.objectid >= total_fs_bytes) {
|
|
|
-no_cache:
|
|
|
- if (!wrapped) {
|
|
|
- wrapped = 1;
|
|
|
- last = search_start;
|
|
|
- goto wrapped;
|
|
|
- }
|
|
|
+ if (!cache || cache->key.objectid >= total_fs_bytes)
|
|
|
goto out;
|
|
|
- }
|
|
|
- if (cache_miss && !cache->cached) {
|
|
|
- cache_block_group(root, cache);
|
|
|
- last = cache_miss;
|
|
|
- cache = btrfs_lookup_first_block_group(root->fs_info, last);
|
|
|
- }
|
|
|
- cache_miss = 0;
|
|
|
- cache = btrfs_find_block_group(root, cache, last, data, 0);
|
|
|
- if (!cache)
|
|
|
- goto no_cache;
|
|
|
+
|
|
|
*cache_ret = cache;
|
|
|
goto again;
|
|
|
+
|
|
|
+out:
|
|
|
+ return -ENOSPC;
|
|
|
}
|
|
|
|
|
|
static u64 div_factor(u64 num, int factor)
|
|
@@ -338,16 +358,19 @@ static u64 div_factor(u64 num, int factor)
|
|
|
return num;
|
|
|
}
|
|
|
|
|
|
-static int block_group_state_bits(u64 flags)
|
|
|
+static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
|
|
|
+ u64 flags)
|
|
|
{
|
|
|
- int bits = 0;
|
|
|
- if (flags & BTRFS_BLOCK_GROUP_DATA)
|
|
|
- bits |= BLOCK_GROUP_DATA;
|
|
|
- if (flags & BTRFS_BLOCK_GROUP_METADATA)
|
|
|
- bits |= BLOCK_GROUP_METADATA;
|
|
|
- if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
|
|
|
- bits |= BLOCK_GROUP_SYSTEM;
|
|
|
- return bits;
|
|
|
+ struct list_head *head = &info->space_info;
|
|
|
+ struct list_head *cur;
|
|
|
+ struct btrfs_space_info *found;
|
|
|
+ list_for_each(cur, head) {
|
|
|
+ found = list_entry(cur, struct btrfs_space_info, list);
|
|
|
+ if (found->flags == flags)
|
|
|
+ return found;
|
|
|
+ }
|
|
|
+ return NULL;
|
|
|
+
|
|
|
}
|
|
|
|
|
|
static struct btrfs_block_group_cache *
|
|
@@ -356,28 +379,19 @@ __btrfs_find_block_group(struct btrfs_root *root,
|
|
|
u64 search_start, int data, int owner)
|
|
|
{
|
|
|
struct btrfs_block_group_cache *cache;
|
|
|
- struct extent_io_tree *block_group_cache;
|
|
|
struct btrfs_block_group_cache *found_group = NULL;
|
|
|
struct btrfs_fs_info *info = root->fs_info;
|
|
|
+ struct btrfs_space_info *sinfo;
|
|
|
u64 used;
|
|
|
u64 last = 0;
|
|
|
- u64 start;
|
|
|
- u64 end;
|
|
|
u64 free_check;
|
|
|
- u64 ptr;
|
|
|
- int bit;
|
|
|
- int ret;
|
|
|
int full_search = 0;
|
|
|
int factor = 10;
|
|
|
int wrapped = 0;
|
|
|
|
|
|
- block_group_cache = &info->block_group_cache;
|
|
|
-
|
|
|
if (data & BTRFS_BLOCK_GROUP_METADATA)
|
|
|
factor = 9;
|
|
|
|
|
|
- bit = block_group_state_bits(data);
|
|
|
-
|
|
|
if (search_start) {
|
|
|
struct btrfs_block_group_cache *shint;
|
|
|
shint = btrfs_lookup_first_block_group(info, search_start);
|
|
@@ -408,20 +422,30 @@ __btrfs_find_block_group(struct btrfs_root *root,
|
|
|
else
|
|
|
last = search_start;
|
|
|
}
|
|
|
+ sinfo = __find_space_info(root->fs_info, data);
|
|
|
+ if (!sinfo)
|
|
|
+ goto found;
|
|
|
again:
|
|
|
while(1) {
|
|
|
- ret = find_first_extent_bit(block_group_cache, last,
|
|
|
- &start, &end, bit);
|
|
|
- if (ret)
|
|
|
- break;
|
|
|
+ struct list_head *l;
|
|
|
|
|
|
- ret = get_state_private(block_group_cache, start, &ptr);
|
|
|
- if (ret) {
|
|
|
- last = end + 1;
|
|
|
- continue;
|
|
|
+ cache = NULL;
|
|
|
+
|
|
|
+ spin_lock(&sinfo->lock);
|
|
|
+ list_for_each(l, &sinfo->block_groups) {
|
|
|
+ struct btrfs_block_group_cache *entry;
|
|
|
+ entry = list_entry(l, struct btrfs_block_group_cache,
|
|
|
+ list);
|
|
|
+ if ((entry->key.objectid >= last) &&
|
|
|
+ (!cache || (entry->key.objectid <
|
|
|
+ cache->key.objectid)))
|
|
|
+ cache = entry;
|
|
|
}
|
|
|
+ spin_unlock(&sinfo->lock);
|
|
|
+
|
|
|
+ if (!cache)
|
|
|
+ break;
|
|
|
|
|
|
- cache = (struct btrfs_block_group_cache *)(unsigned long)ptr;
|
|
|
spin_lock(&cache->lock);
|
|
|
last = cache->key.objectid + cache->key.offset;
|
|
|
used = btrfs_block_group_used(&cache->item);
|
|
@@ -462,6 +486,7 @@ struct btrfs_block_group_cache *btrfs_find_block_group(struct btrfs_root *root,
|
|
|
ret = __btrfs_find_block_group(root, hint, search_start, data, owner);
|
|
|
return ret;
|
|
|
}
|
|
|
+
|
|
|
static u64 hash_extent_ref(u64 root_objectid, u64 ref_generation,
|
|
|
u64 owner, u64 owner_offset)
|
|
|
{
|
|
@@ -1175,34 +1200,37 @@ fail:
|
|
|
int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
|
|
|
struct btrfs_root *root)
|
|
|
{
|
|
|
- struct extent_io_tree *block_group_cache;
|
|
|
- struct btrfs_block_group_cache *cache;
|
|
|
- int ret;
|
|
|
+ struct btrfs_block_group_cache *cache, *entry;
|
|
|
+ struct rb_node *n;
|
|
|
int err = 0;
|
|
|
int werr = 0;
|
|
|
struct btrfs_path *path;
|
|
|
u64 last = 0;
|
|
|
- u64 start;
|
|
|
- u64 end;
|
|
|
- u64 ptr;
|
|
|
|
|
|
- block_group_cache = &root->fs_info->block_group_cache;
|
|
|
path = btrfs_alloc_path();
|
|
|
if (!path)
|
|
|
return -ENOMEM;
|
|
|
|
|
|
mutex_lock(&root->fs_info->alloc_mutex);
|
|
|
while(1) {
|
|
|
- ret = find_first_extent_bit(block_group_cache, last,
|
|
|
- &start, &end, BLOCK_GROUP_DIRTY);
|
|
|
- if (ret)
|
|
|
- break;
|
|
|
+ cache = NULL;
|
|
|
+ spin_lock(&root->fs_info->block_group_cache_lock);
|
|
|
+ for (n = rb_first(&root->fs_info->block_group_cache_tree);
|
|
|
+ n; n = rb_next(n)) {
|
|
|
+ entry = rb_entry(n, struct btrfs_block_group_cache,
|
|
|
+ cache_node);
|
|
|
+ if (entry->dirty) {
|
|
|
+ cache = entry;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ spin_unlock(&root->fs_info->block_group_cache_lock);
|
|
|
|
|
|
- last = end + 1;
|
|
|
- ret = get_state_private(block_group_cache, start, &ptr);
|
|
|
- if (ret)
|
|
|
+ if (!cache)
|
|
|
break;
|
|
|
- cache = (struct btrfs_block_group_cache *)(unsigned long)ptr;
|
|
|
+
|
|
|
+ last += cache->key.offset;
|
|
|
+
|
|
|
err = write_one_cache_group(trans, root,
|
|
|
path, cache);
|
|
|
/*
|
|
@@ -1214,29 +1242,14 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
|
|
|
werr = err;
|
|
|
continue;
|
|
|
}
|
|
|
- clear_extent_bits(block_group_cache, start, end,
|
|
|
- BLOCK_GROUP_DIRTY, GFP_NOFS);
|
|
|
+
|
|
|
+ cache->dirty = 0;
|
|
|
}
|
|
|
btrfs_free_path(path);
|
|
|
mutex_unlock(&root->fs_info->alloc_mutex);
|
|
|
return werr;
|
|
|
}
|
|
|
|
|
|
-static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
|
|
|
- u64 flags)
|
|
|
-{
|
|
|
- struct list_head *head = &info->space_info;
|
|
|
- struct list_head *cur;
|
|
|
- struct btrfs_space_info *found;
|
|
|
- list_for_each(cur, head) {
|
|
|
- found = list_entry(cur, struct btrfs_space_info, list);
|
|
|
- if (found->flags == flags)
|
|
|
- return found;
|
|
|
- }
|
|
|
- return NULL;
|
|
|
-
|
|
|
-}
|
|
|
-
|
|
|
static int update_space_info(struct btrfs_fs_info *info, u64 flags,
|
|
|
u64 total_bytes, u64 bytes_used,
|
|
|
struct btrfs_space_info **space_info)
|
|
@@ -1256,6 +1269,8 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags,
|
|
|
return -ENOMEM;
|
|
|
|
|
|
list_add(&found->list, &info->space_info);
|
|
|
+ INIT_LIST_HEAD(&found->block_groups);
|
|
|
+ spin_lock_init(&found->lock);
|
|
|
found->flags = flags;
|
|
|
found->total_bytes = total_bytes;
|
|
|
found->bytes_used = bytes_used;
|
|
@@ -1318,7 +1333,7 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
|
|
|
u64 thresh;
|
|
|
u64 start;
|
|
|
u64 num_bytes;
|
|
|
- int ret;
|
|
|
+ int ret = 0;
|
|
|
|
|
|
flags = reduce_alloc_profile(extent_root, flags);
|
|
|
|
|
@@ -1355,10 +1370,11 @@ printk("space info full %Lu\n", flags);
|
|
|
ret = btrfs_make_block_group(trans, extent_root, 0, flags,
|
|
|
BTRFS_FIRST_CHUNK_TREE_OBJECTID, start, num_bytes);
|
|
|
BUG_ON(ret);
|
|
|
+
|
|
|
out_unlock:
|
|
|
mutex_unlock(&extent_root->fs_info->chunk_mutex);
|
|
|
out:
|
|
|
- return 0;
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
static int update_block_group(struct btrfs_trans_handle *trans,
|
|
@@ -1371,8 +1387,6 @@ static int update_block_group(struct btrfs_trans_handle *trans,
|
|
|
u64 total = num_bytes;
|
|
|
u64 old_val;
|
|
|
u64 byte_in_group;
|
|
|
- u64 start;
|
|
|
- u64 end;
|
|
|
|
|
|
WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
|
|
|
while(total) {
|
|
@@ -1382,12 +1396,9 @@ static int update_block_group(struct btrfs_trans_handle *trans,
|
|
|
}
|
|
|
byte_in_group = bytenr - cache->key.objectid;
|
|
|
WARN_ON(byte_in_group > cache->key.offset);
|
|
|
- start = cache->key.objectid;
|
|
|
- end = start + cache->key.offset - 1;
|
|
|
- set_extent_bits(&info->block_group_cache, start, end,
|
|
|
- BLOCK_GROUP_DIRTY, GFP_NOFS);
|
|
|
|
|
|
spin_lock(&cache->lock);
|
|
|
+ cache->dirty = 1;
|
|
|
old_val = btrfs_block_group_used(&cache->item);
|
|
|
num_bytes = min(total, cache->key.offset - byte_in_group);
|
|
|
if (alloc) {
|
|
@@ -1401,9 +1412,11 @@ static int update_block_group(struct btrfs_trans_handle *trans,
|
|
|
btrfs_set_block_group_used(&cache->item, old_val);
|
|
|
spin_unlock(&cache->lock);
|
|
|
if (mark_free) {
|
|
|
- set_extent_dirty(&info->free_space_cache,
|
|
|
- bytenr, bytenr + num_bytes - 1,
|
|
|
- GFP_NOFS);
|
|
|
+ int ret;
|
|
|
+ ret = btrfs_add_free_space(cache, bytenr,
|
|
|
+ num_bytes);
|
|
|
+ if (ret)
|
|
|
+ return -1;
|
|
|
}
|
|
|
}
|
|
|
total -= num_bytes;
|
|
@@ -1414,16 +1427,13 @@ static int update_block_group(struct btrfs_trans_handle *trans,
|
|
|
|
|
|
static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
|
|
|
{
|
|
|
- u64 start;
|
|
|
- u64 end;
|
|
|
- int ret;
|
|
|
- ret = find_first_extent_bit(&root->fs_info->block_group_cache,
|
|
|
- search_start, &start, &end,
|
|
|
- BLOCK_GROUP_DATA | BLOCK_GROUP_METADATA |
|
|
|
- BLOCK_GROUP_SYSTEM);
|
|
|
- if (ret)
|
|
|
+ struct btrfs_block_group_cache *cache;
|
|
|
+
|
|
|
+ cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
|
|
|
+ if (!cache)
|
|
|
return 0;
|
|
|
- return start;
|
|
|
+
|
|
|
+ return cache->key.objectid;
|
|
|
}
|
|
|
|
|
|
|
|
@@ -1501,8 +1511,7 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
|
|
|
u64 start;
|
|
|
u64 end;
|
|
|
int ret;
|
|
|
- struct extent_io_tree *free_space_cache;
|
|
|
- free_space_cache = &root->fs_info->free_space_cache;
|
|
|
+ struct btrfs_block_group_cache *cache;
|
|
|
|
|
|
mutex_lock(&root->fs_info->alloc_mutex);
|
|
|
while(1) {
|
|
@@ -1512,7 +1521,9 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
|
|
|
break;
|
|
|
btrfs_update_pinned_extents(root, start, end + 1 - start, 0);
|
|
|
clear_extent_dirty(unpin, start, end, GFP_NOFS);
|
|
|
- set_extent_dirty(free_space_cache, start, end, GFP_NOFS);
|
|
|
+ cache = btrfs_lookup_block_group(root->fs_info, start);
|
|
|
+ if (cache->cached)
|
|
|
+ btrfs_add_free_space(cache, start, end - start + 1);
|
|
|
if (need_resched()) {
|
|
|
mutex_unlock(&root->fs_info->alloc_mutex);
|
|
|
cond_resched();
|
|
@@ -1875,9 +1886,12 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
|
|
|
/* if metadata always pin */
|
|
|
if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID) {
|
|
|
if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
|
|
|
+ struct btrfs_block_group_cache *cache;
|
|
|
+
|
|
|
/* btrfs_free_reserved_extent */
|
|
|
- set_extent_dirty(&root->fs_info->free_space_cache,
|
|
|
- bytenr, bytenr + num_bytes - 1, GFP_NOFS);
|
|
|
+ cache = btrfs_lookup_block_group(root->fs_info, bytenr);
|
|
|
+ BUG_ON(!cache);
|
|
|
+ btrfs_add_free_space(cache, bytenr, num_bytes);
|
|
|
return 0;
|
|
|
}
|
|
|
pin = 1;
|
|
@@ -1942,8 +1956,6 @@ static int noinline find_free_extent(struct btrfs_trans_handle *trans,
|
|
|
u64 total_needed = num_bytes;
|
|
|
u64 *last_ptr = NULL;
|
|
|
struct btrfs_block_group_cache *block_group;
|
|
|
- int full_scan = 0;
|
|
|
- int wrapped = 0;
|
|
|
int chunk_alloc_done = 0;
|
|
|
int empty_cluster = 2 * 1024 * 1024;
|
|
|
int allowed_chunk_alloc = 0;
|
|
@@ -1959,9 +1971,9 @@ static int noinline find_free_extent(struct btrfs_trans_handle *trans,
|
|
|
empty_cluster = 256 * 1024;
|
|
|
}
|
|
|
|
|
|
- if ((data & BTRFS_BLOCK_GROUP_DATA) && btrfs_test_opt(root, SSD)) {
|
|
|
+ if ((data & BTRFS_BLOCK_GROUP_DATA) && btrfs_test_opt(root, SSD))
|
|
|
last_ptr = &root->fs_info->last_data_alloc;
|
|
|
- }
|
|
|
+
|
|
|
if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
|
|
|
last_ptr = &root->fs_info->last_log_alloc;
|
|
|
if (!last_ptr == 0 && root->fs_info->last_alloc) {
|
|
@@ -1972,9 +1984,8 @@ static int noinline find_free_extent(struct btrfs_trans_handle *trans,
|
|
|
if (last_ptr) {
|
|
|
if (*last_ptr)
|
|
|
hint_byte = *last_ptr;
|
|
|
- else {
|
|
|
+ else
|
|
|
empty_size += empty_cluster;
|
|
|
- }
|
|
|
}
|
|
|
|
|
|
search_start = max(search_start, first_logical_byte(root, 0));
|
|
@@ -1983,145 +1994,172 @@ static int noinline find_free_extent(struct btrfs_trans_handle *trans,
|
|
|
if (search_end == (u64)-1)
|
|
|
search_end = btrfs_super_total_bytes(&info->super_copy);
|
|
|
|
|
|
- if (hint_byte) {
|
|
|
- block_group = btrfs_lookup_first_block_group(info, hint_byte);
|
|
|
- if (!block_group)
|
|
|
- hint_byte = search_start;
|
|
|
- block_group = btrfs_find_block_group(root, block_group,
|
|
|
- hint_byte, data, 1);
|
|
|
- if (last_ptr && *last_ptr == 0 && block_group)
|
|
|
- hint_byte = block_group->key.objectid;
|
|
|
- } else {
|
|
|
- block_group = btrfs_find_block_group(root,
|
|
|
- trans->block_group,
|
|
|
- search_start, data, 1);
|
|
|
- }
|
|
|
search_start = max(search_start, hint_byte);
|
|
|
-
|
|
|
total_needed += empty_size;
|
|
|
|
|
|
-check_failed:
|
|
|
- if (!block_group) {
|
|
|
- block_group = btrfs_lookup_first_block_group(info,
|
|
|
- search_start);
|
|
|
- if (!block_group)
|
|
|
- block_group = btrfs_lookup_first_block_group(info,
|
|
|
- orig_search_start);
|
|
|
- }
|
|
|
- if (full_scan && !chunk_alloc_done) {
|
|
|
- if (allowed_chunk_alloc) {
|
|
|
- do_chunk_alloc(trans, root,
|
|
|
- num_bytes + 2 * 1024 * 1024, data, 1);
|
|
|
- allowed_chunk_alloc = 0;
|
|
|
- } else if (block_group && block_group_bits(block_group, data)) {
|
|
|
- block_group->space_info->force_alloc = 1;
|
|
|
+new_group:
|
|
|
+ block_group = btrfs_lookup_block_group(info, search_start);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Ok this looks a little tricky, buts its really simple. First if we
|
|
|
+ * didn't find a block group obviously we want to start over.
|
|
|
+ * Secondly, if the block group we found does not match the type we
|
|
|
+ * need, and we have a last_ptr and its not 0, chances are the last
|
|
|
+ * allocation we made was at the end of the block group, so lets go
|
|
|
+ * ahead and skip the looking through the rest of the block groups and
|
|
|
+ * start at the beginning. This helps with metadata allocations,
|
|
|
+ * since you are likely to have a bunch of data block groups to search
|
|
|
+ * through first before you realize that you need to start over, so go
|
|
|
+ * ahead and start over and save the time.
|
|
|
+ */
|
|
|
+ if (!block_group || (!block_group_bits(block_group, data) &&
|
|
|
+ last_ptr && *last_ptr)) {
|
|
|
+ if (search_start != orig_search_start) {
|
|
|
+ if (last_ptr && *last_ptr)
|
|
|
+ *last_ptr = 0;
|
|
|
+ search_start = orig_search_start;
|
|
|
+ goto new_group;
|
|
|
+ } else if (!chunk_alloc_done && allowed_chunk_alloc) {
|
|
|
+ ret = do_chunk_alloc(trans, root,
|
|
|
+ num_bytes + 2 * 1024 * 1024,
|
|
|
+ data, 1);
|
|
|
+ if (ret < 0) {
|
|
|
+ struct btrfs_space_info *info;
|
|
|
+
|
|
|
+ info = __find_space_info(root->fs_info, data);
|
|
|
+ goto error;
|
|
|
+ }
|
|
|
+ BUG_ON(ret);
|
|
|
+ chunk_alloc_done = 1;
|
|
|
+ search_start = orig_search_start;
|
|
|
+ goto new_group;
|
|
|
+ } else {
|
|
|
+ ret = -ENOSPC;
|
|
|
+ goto error;
|
|
|
}
|
|
|
- chunk_alloc_done = 1;
|
|
|
- }
|
|
|
- ret = find_search_start(root, &block_group, &search_start,
|
|
|
- total_needed, data);
|
|
|
- if (ret == -ENOSPC && last_ptr && *last_ptr) {
|
|
|
- *last_ptr = 0;
|
|
|
- block_group = btrfs_lookup_first_block_group(info,
|
|
|
- orig_search_start);
|
|
|
- search_start = orig_search_start;
|
|
|
- ret = find_search_start(root, &block_group, &search_start,
|
|
|
- total_needed, data);
|
|
|
}
|
|
|
- if (ret == -ENOSPC)
|
|
|
- goto enospc;
|
|
|
- if (ret)
|
|
|
- goto error;
|
|
|
|
|
|
- if (last_ptr && *last_ptr && search_start != *last_ptr) {
|
|
|
- *last_ptr = 0;
|
|
|
- if (!empty_size) {
|
|
|
- empty_size += empty_cluster;
|
|
|
- total_needed += empty_size;
|
|
|
+ /*
|
|
|
+ * this is going to seach through all of the existing block groups it
|
|
|
+ * can find, so if we don't find something we need to see if we can
|
|
|
+ * allocate what we need.
|
|
|
+ */
|
|
|
+ ret = find_free_space(root, &block_group, &search_start,
|
|
|
+ total_needed, data);
|
|
|
+ if (ret == -ENOSPC) {
|
|
|
+ /*
|
|
|
+ * instead of allocating, start at the original search start
|
|
|
+ * and see if there is something to be found, if not then we
|
|
|
+ * allocate
|
|
|
+ */
|
|
|
+ if (search_start != orig_search_start) {
|
|
|
+ if (last_ptr && *last_ptr) {
|
|
|
+ *last_ptr = 0;
|
|
|
+ total_needed += empty_cluster;
|
|
|
+ }
|
|
|
+ search_start = orig_search_start;
|
|
|
+ goto new_group;
|
|
|
}
|
|
|
- block_group = btrfs_lookup_first_block_group(info,
|
|
|
- orig_search_start);
|
|
|
- search_start = orig_search_start;
|
|
|
- ret = find_search_start(root, &block_group,
|
|
|
- &search_start, total_needed, data);
|
|
|
- if (ret == -ENOSPC)
|
|
|
- goto enospc;
|
|
|
- if (ret)
|
|
|
+
|
|
|
+ /*
|
|
|
+ * we've already allocated, we're pretty screwed
|
|
|
+ */
|
|
|
+ if (chunk_alloc_done) {
|
|
|
goto error;
|
|
|
+ } else if (!allowed_chunk_alloc && block_group &&
|
|
|
+ block_group_bits(block_group, data)) {
|
|
|
+ block_group->space_info->force_alloc = 1;
|
|
|
+ goto error;
|
|
|
+ } else if (!allowed_chunk_alloc) {
|
|
|
+ goto error;
|
|
|
+ }
|
|
|
+
|
|
|
+ ret = do_chunk_alloc(trans, root, num_bytes + 2 * 1024 * 1024,
|
|
|
+ data, 1);
|
|
|
+ if (ret < 0)
|
|
|
+ goto error;
|
|
|
+
|
|
|
+ BUG_ON(ret);
|
|
|
+ chunk_alloc_done = 1;
|
|
|
+ if (block_group)
|
|
|
+ search_start = block_group->key.objectid +
|
|
|
+ block_group->key.offset;
|
|
|
+ else
|
|
|
+ search_start = orig_search_start;
|
|
|
+ goto new_group;
|
|
|
}
|
|
|
|
|
|
+ if (ret)
|
|
|
+ goto error;
|
|
|
+
|
|
|
search_start = stripe_align(root, search_start);
|
|
|
ins->objectid = search_start;
|
|
|
ins->offset = num_bytes;
|
|
|
|
|
|
- if (ins->objectid + num_bytes >= search_end)
|
|
|
- goto enospc;
|
|
|
+ if (ins->objectid + num_bytes >= search_end) {
|
|
|
+ search_start = orig_search_start;
|
|
|
+ if (chunk_alloc_done) {
|
|
|
+ ret = -ENOSPC;
|
|
|
+ goto error;
|
|
|
+ }
|
|
|
+ goto new_group;
|
|
|
+ }
|
|
|
|
|
|
if (ins->objectid + num_bytes >
|
|
|
block_group->key.objectid + block_group->key.offset) {
|
|
|
+ if (search_start == orig_search_start && chunk_alloc_done) {
|
|
|
+ ret = -ENOSPC;
|
|
|
+ goto error;
|
|
|
+ }
|
|
|
search_start = block_group->key.objectid +
|
|
|
block_group->key.offset;
|
|
|
goto new_group;
|
|
|
}
|
|
|
|
|
|
- if (test_range_bit(&info->extent_ins, ins->objectid,
|
|
|
- ins->objectid + num_bytes -1, EXTENT_LOCKED, 0)) {
|
|
|
- search_start = ins->objectid + num_bytes;
|
|
|
- goto new_group;
|
|
|
- }
|
|
|
-
|
|
|
- if (test_range_bit(&info->pinned_extents, ins->objectid,
|
|
|
- ins->objectid + num_bytes -1, EXTENT_DIRTY, 0)) {
|
|
|
- search_start = ins->objectid + num_bytes;
|
|
|
- goto new_group;
|
|
|
- }
|
|
|
-
|
|
|
if (exclude_nr > 0 && (ins->objectid + num_bytes > exclude_start &&
|
|
|
ins->objectid < exclude_start + exclude_nr)) {
|
|
|
search_start = exclude_start + exclude_nr;
|
|
|
goto new_group;
|
|
|
}
|
|
|
|
|
|
- if (!(data & BTRFS_BLOCK_GROUP_DATA)) {
|
|
|
- block_group = btrfs_lookup_block_group(info, ins->objectid);
|
|
|
- if (block_group)
|
|
|
- trans->block_group = block_group;
|
|
|
- }
|
|
|
+ if (!(data & BTRFS_BLOCK_GROUP_DATA))
|
|
|
+ trans->block_group = block_group;
|
|
|
+
|
|
|
ins->offset = num_bytes;
|
|
|
if (last_ptr) {
|
|
|
*last_ptr = ins->objectid + ins->offset;
|
|
|
if (*last_ptr ==
|
|
|
- btrfs_super_total_bytes(&root->fs_info->super_copy)) {
|
|
|
+ btrfs_super_total_bytes(&root->fs_info->super_copy))
|
|
|
*last_ptr = 0;
|
|
|
- }
|
|
|
- }
|
|
|
- return 0;
|
|
|
-
|
|
|
-new_group:
|
|
|
- if (search_start + num_bytes >= search_end) {
|
|
|
-enospc:
|
|
|
- search_start = orig_search_start;
|
|
|
- if (full_scan) {
|
|
|
- ret = -ENOSPC;
|
|
|
- goto error;
|
|
|
- }
|
|
|
- if (wrapped) {
|
|
|
- if (!full_scan)
|
|
|
- total_needed -= empty_size;
|
|
|
- full_scan = 1;
|
|
|
- } else
|
|
|
- wrapped = 1;
|
|
|
}
|
|
|
- block_group = btrfs_lookup_first_block_group(info, search_start);
|
|
|
- cond_resched();
|
|
|
- block_group = btrfs_find_block_group(root, block_group,
|
|
|
- search_start, data, 0);
|
|
|
- goto check_failed;
|
|
|
|
|
|
+ ret = 0;
|
|
|
error:
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+static void dump_space_info(struct btrfs_space_info *info, u64 bytes)
|
|
|
+{
|
|
|
+ struct btrfs_block_group_cache *cache;
|
|
|
+ struct list_head *l;
|
|
|
+
|
|
|
+ printk(KERN_INFO "space_info has %Lu free, is %sfull\n",
|
|
|
+ info->total_bytes - info->bytes_used - info->bytes_pinned,
|
|
|
+ (info->full) ? "" : "not ");
|
|
|
+
|
|
|
+ spin_lock(&info->lock);
|
|
|
+ list_for_each(l, &info->block_groups) {
|
|
|
+ cache = list_entry(l, struct btrfs_block_group_cache, list);
|
|
|
+ spin_lock(&cache->lock);
|
|
|
+ printk(KERN_INFO "block group %Lu has %Lu bytes, %Lu used "
|
|
|
+ "%Lu pinned\n",
|
|
|
+ cache->key.objectid, cache->key.offset,
|
|
|
+ btrfs_block_group_used(&cache->item), cache->pinned);
|
|
|
+ btrfs_dump_free_space(cache, bytes);
|
|
|
+ spin_unlock(&cache->lock);
|
|
|
+ }
|
|
|
+ spin_unlock(&info->lock);
|
|
|
+}
|
|
|
static int __btrfs_reserve_extent(struct btrfs_trans_handle *trans,
|
|
|
struct btrfs_root *root,
|
|
|
u64 num_bytes, u64 min_alloc_size,
|
|
@@ -2133,6 +2171,7 @@ static int __btrfs_reserve_extent(struct btrfs_trans_handle *trans,
|
|
|
u64 search_start = 0;
|
|
|
u64 alloc_profile;
|
|
|
struct btrfs_fs_info *info = root->fs_info;
|
|
|
+ struct btrfs_block_group_cache *cache;
|
|
|
|
|
|
if (data) {
|
|
|
alloc_profile = info->avail_data_alloc_bits &
|
|
@@ -2160,11 +2199,9 @@ again:
|
|
|
BTRFS_BLOCK_GROUP_METADATA |
|
|
|
(info->metadata_alloc_profile &
|
|
|
info->avail_metadata_alloc_bits), 0);
|
|
|
- BUG_ON(ret);
|
|
|
}
|
|
|
ret = do_chunk_alloc(trans, root->fs_info->extent_root,
|
|
|
num_bytes + 2 * 1024 * 1024, data, 0);
|
|
|
- BUG_ON(ret);
|
|
|
}
|
|
|
|
|
|
WARN_ON(num_bytes < root->sectorsize);
|
|
@@ -2175,26 +2212,44 @@ again:
|
|
|
|
|
|
if (ret == -ENOSPC && num_bytes > min_alloc_size) {
|
|
|
num_bytes = num_bytes >> 1;
|
|
|
+ num_bytes = num_bytes & ~(root->sectorsize - 1);
|
|
|
num_bytes = max(num_bytes, min_alloc_size);
|
|
|
do_chunk_alloc(trans, root->fs_info->extent_root,
|
|
|
num_bytes, data, 1);
|
|
|
goto again;
|
|
|
}
|
|
|
if (ret) {
|
|
|
- printk("allocation failed flags %Lu\n", data);
|
|
|
+ struct btrfs_space_info *sinfo;
|
|
|
+
|
|
|
+ sinfo = __find_space_info(root->fs_info, data);
|
|
|
+ printk("allocation failed flags %Lu, wanted %Lu\n",
|
|
|
+ data, num_bytes);
|
|
|
+ dump_space_info(sinfo, num_bytes);
|
|
|
BUG();
|
|
|
}
|
|
|
- clear_extent_dirty(&root->fs_info->free_space_cache,
|
|
|
- ins->objectid, ins->objectid + ins->offset - 1,
|
|
|
- GFP_NOFS);
|
|
|
- return 0;
|
|
|
+ cache = btrfs_lookup_block_group(root->fs_info, ins->objectid);
|
|
|
+ if (!cache) {
|
|
|
+ printk(KERN_ERR "Unable to find block group for %Lu\n", ins->objectid);
|
|
|
+ return -ENOSPC;
|
|
|
+ }
|
|
|
+
|
|
|
+ ret = btrfs_remove_free_space(cache, ins->objectid, ins->offset);
|
|
|
+
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len)
|
|
|
{
|
|
|
+ struct btrfs_block_group_cache *cache;
|
|
|
+
|
|
|
maybe_lock_mutex(root);
|
|
|
- set_extent_dirty(&root->fs_info->free_space_cache,
|
|
|
- start, start + len - 1, GFP_NOFS);
|
|
|
+ cache = btrfs_lookup_block_group(root->fs_info, start);
|
|
|
+ if (!cache) {
|
|
|
+ printk(KERN_ERR "Unable to find block group for %Lu\n", start);
|
|
|
+ maybe_unlock_mutex(root);
|
|
|
+ return -ENOSPC;
|
|
|
+ }
|
|
|
+ btrfs_add_free_space(cache, start, len);
|
|
|
maybe_unlock_mutex(root);
|
|
|
return 0;
|
|
|
}
|
|
@@ -2264,8 +2319,8 @@ static int __btrfs_alloc_reserved_extent(struct btrfs_trans_handle *trans,
|
|
|
|
|
|
ret = btrfs_insert_empty_items(trans, extent_root, path, keys,
|
|
|
sizes, 2);
|
|
|
-
|
|
|
BUG_ON(ret);
|
|
|
+
|
|
|
extent_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
|
|
|
struct btrfs_extent_item);
|
|
|
btrfs_set_extent_refs(path->nodes[0], extent_item, 1);
|
|
@@ -2336,9 +2391,9 @@ int btrfs_alloc_logged_extent(struct btrfs_trans_handle *trans,
|
|
|
block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
|
|
|
cache_block_group(root, block_group);
|
|
|
|
|
|
- clear_extent_dirty(&root->fs_info->free_space_cache,
|
|
|
- ins->objectid, ins->objectid + ins->offset - 1,
|
|
|
- GFP_NOFS);
|
|
|
+ ret = btrfs_remove_free_space(block_group, ins->objectid, ins->offset);
|
|
|
+ BUG_ON(ret);
|
|
|
+
|
|
|
ret = __btrfs_alloc_reserved_extent(trans, root, root_objectid,
|
|
|
ref_generation, owner,
|
|
|
owner_offset, ins);
|
|
@@ -2843,31 +2898,24 @@ out:
|
|
|
|
|
|
int btrfs_free_block_groups(struct btrfs_fs_info *info)
|
|
|
{
|
|
|
- u64 start;
|
|
|
- u64 end;
|
|
|
- u64 ptr;
|
|
|
- int ret;
|
|
|
+ struct btrfs_block_group_cache *block_group;
|
|
|
+ struct rb_node *n;
|
|
|
|
|
|
mutex_lock(&info->alloc_mutex);
|
|
|
- while(1) {
|
|
|
- ret = find_first_extent_bit(&info->block_group_cache, 0,
|
|
|
- &start, &end, (unsigned int)-1);
|
|
|
- if (ret)
|
|
|
- break;
|
|
|
- ret = get_state_private(&info->block_group_cache, start, &ptr);
|
|
|
- if (!ret)
|
|
|
- kfree((void *)(unsigned long)ptr);
|
|
|
- clear_extent_bits(&info->block_group_cache, start,
|
|
|
- end, (unsigned int)-1, GFP_NOFS);
|
|
|
- }
|
|
|
- while(1) {
|
|
|
- ret = find_first_extent_bit(&info->free_space_cache, 0,
|
|
|
- &start, &end, EXTENT_DIRTY);
|
|
|
- if (ret)
|
|
|
- break;
|
|
|
- clear_extent_dirty(&info->free_space_cache, start,
|
|
|
- end, GFP_NOFS);
|
|
|
- }
|
|
|
+ spin_lock(&info->block_group_cache_lock);
|
|
|
+ while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
|
|
|
+ block_group = rb_entry(n, struct btrfs_block_group_cache,
|
|
|
+ cache_node);
|
|
|
+
|
|
|
+ btrfs_remove_free_space_cache(block_group);
|
|
|
+ rb_erase(&block_group->cache_node,
|
|
|
+ &info->block_group_cache_tree);
|
|
|
+ spin_lock(&block_group->space_info->lock);
|
|
|
+ list_del(&block_group->list);
|
|
|
+ spin_unlock(&block_group->space_info->lock);
|
|
|
+ kfree(block_group);
|
|
|
+ }
|
|
|
+ spin_unlock(&info->block_group_cache_lock);
|
|
|
mutex_unlock(&info->alloc_mutex);
|
|
|
return 0;
|
|
|
}
|
|
@@ -3386,7 +3434,6 @@ int btrfs_shrink_extent_tree(struct btrfs_root *root, u64 shrink_start)
|
|
|
u64 total_found;
|
|
|
u64 shrink_last_byte;
|
|
|
struct btrfs_block_group_cache *shrink_block_group;
|
|
|
- struct btrfs_fs_info *info = root->fs_info;
|
|
|
struct btrfs_key key;
|
|
|
struct btrfs_key found_key;
|
|
|
struct extent_buffer *leaf;
|
|
@@ -3542,15 +3589,17 @@ next:
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
- clear_extent_bits(&info->block_group_cache, key.objectid,
|
|
|
- key.objectid + key.offset - 1,
|
|
|
- (unsigned int)-1, GFP_NOFS);
|
|
|
-
|
|
|
-
|
|
|
- clear_extent_bits(&info->free_space_cache,
|
|
|
- key.objectid, key.objectid + key.offset - 1,
|
|
|
- (unsigned int)-1, GFP_NOFS);
|
|
|
+ spin_lock(&root->fs_info->block_group_cache_lock);
|
|
|
+ rb_erase(&shrink_block_group->cache_node,
|
|
|
+ &root->fs_info->block_group_cache_tree);
|
|
|
+ spin_unlock(&root->fs_info->block_group_cache_lock);
|
|
|
|
|
|
+ ret = btrfs_remove_free_space(shrink_block_group, key.objectid,
|
|
|
+ key.offset);
|
|
|
+ if (ret) {
|
|
|
+ btrfs_end_transaction(trans, root);
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
/*
|
|
|
memset(shrink_block_group, 0, sizeof(*shrink_block_group));
|
|
|
kfree(shrink_block_group);
|
|
@@ -3566,9 +3615,9 @@ next:
|
|
|
/* the code to unpin extents might set a few bits in the free
|
|
|
* space cache for this range again
|
|
|
*/
|
|
|
- clear_extent_bits(&info->free_space_cache,
|
|
|
- key.objectid, key.objectid + key.offset - 1,
|
|
|
- (unsigned int)-1, GFP_NOFS);
|
|
|
+ /* XXX? */
|
|
|
+ ret = btrfs_remove_free_space(shrink_block_group, key.objectid,
|
|
|
+ key.offset);
|
|
|
out:
|
|
|
btrfs_free_path(path);
|
|
|
mutex_unlock(&root->fs_info->alloc_mutex);
|
|
@@ -3616,16 +3665,13 @@ int btrfs_read_block_groups(struct btrfs_root *root)
|
|
|
{
|
|
|
struct btrfs_path *path;
|
|
|
int ret;
|
|
|
- int bit;
|
|
|
struct btrfs_block_group_cache *cache;
|
|
|
struct btrfs_fs_info *info = root->fs_info;
|
|
|
struct btrfs_space_info *space_info;
|
|
|
- struct extent_io_tree *block_group_cache;
|
|
|
struct btrfs_key key;
|
|
|
struct btrfs_key found_key;
|
|
|
struct extent_buffer *leaf;
|
|
|
|
|
|
- block_group_cache = &info->block_group_cache;
|
|
|
root = info->extent_root;
|
|
|
key.objectid = 0;
|
|
|
key.offset = 0;
|
|
@@ -3653,6 +3699,7 @@ int btrfs_read_block_groups(struct btrfs_root *root)
|
|
|
}
|
|
|
|
|
|
spin_lock_init(&cache->lock);
|
|
|
+ INIT_LIST_HEAD(&cache->list);
|
|
|
read_extent_buffer(leaf, &cache->item,
|
|
|
btrfs_item_ptr_offset(leaf, path->slots[0]),
|
|
|
sizeof(cache->item));
|
|
@@ -3661,31 +3708,19 @@ int btrfs_read_block_groups(struct btrfs_root *root)
|
|
|
key.objectid = found_key.objectid + found_key.offset;
|
|
|
btrfs_release_path(root, path);
|
|
|
cache->flags = btrfs_block_group_flags(&cache->item);
|
|
|
- bit = 0;
|
|
|
- if (cache->flags & BTRFS_BLOCK_GROUP_DATA) {
|
|
|
- bit = BLOCK_GROUP_DATA;
|
|
|
- } else if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
|
|
|
- bit = BLOCK_GROUP_SYSTEM;
|
|
|
- } else if (cache->flags & BTRFS_BLOCK_GROUP_METADATA) {
|
|
|
- bit = BLOCK_GROUP_METADATA;
|
|
|
- }
|
|
|
- set_avail_alloc_bits(info, cache->flags);
|
|
|
|
|
|
ret = update_space_info(info, cache->flags, found_key.offset,
|
|
|
btrfs_block_group_used(&cache->item),
|
|
|
&space_info);
|
|
|
BUG_ON(ret);
|
|
|
cache->space_info = space_info;
|
|
|
+ spin_lock(&space_info->lock);
|
|
|
+ list_add(&cache->list, &space_info->block_groups);
|
|
|
+ spin_unlock(&space_info->lock);
|
|
|
+
|
|
|
+ ret = btrfs_add_block_group_cache(root->fs_info, cache);
|
|
|
+ BUG_ON(ret);
|
|
|
|
|
|
- /* use EXTENT_LOCKED to prevent merging */
|
|
|
- set_extent_bits(block_group_cache, found_key.objectid,
|
|
|
- found_key.objectid + found_key.offset - 1,
|
|
|
- EXTENT_LOCKED, GFP_NOFS);
|
|
|
- set_state_private(block_group_cache, found_key.objectid,
|
|
|
- (unsigned long)cache);
|
|
|
- set_extent_bits(block_group_cache, found_key.objectid,
|
|
|
- found_key.objectid + found_key.offset - 1,
|
|
|
- bit | EXTENT_LOCKED, GFP_NOFS);
|
|
|
if (key.objectid >=
|
|
|
btrfs_super_total_bytes(&info->super_copy))
|
|
|
break;
|
|
@@ -3703,22 +3738,22 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
|
|
|
u64 size)
|
|
|
{
|
|
|
int ret;
|
|
|
- int bit = 0;
|
|
|
struct btrfs_root *extent_root;
|
|
|
struct btrfs_block_group_cache *cache;
|
|
|
- struct extent_io_tree *block_group_cache;
|
|
|
|
|
|
WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
|
|
|
extent_root = root->fs_info->extent_root;
|
|
|
- block_group_cache = &root->fs_info->block_group_cache;
|
|
|
|
|
|
root->fs_info->last_trans_new_blockgroup = trans->transid;
|
|
|
|
|
|
cache = kzalloc(sizeof(*cache), GFP_NOFS);
|
|
|
- BUG_ON(!cache);
|
|
|
+ if (!cache)
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
cache->key.objectid = chunk_offset;
|
|
|
cache->key.offset = size;
|
|
|
spin_lock_init(&cache->lock);
|
|
|
+ INIT_LIST_HEAD(&cache->list);
|
|
|
btrfs_set_key_type(&cache->key, BTRFS_BLOCK_GROUP_ITEM_KEY);
|
|
|
|
|
|
btrfs_set_block_group_used(&cache->item, bytes_used);
|
|
@@ -3729,16 +3764,12 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
|
|
|
ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
|
|
|
&cache->space_info);
|
|
|
BUG_ON(ret);
|
|
|
+ spin_lock(&cache->space_info->lock);
|
|
|
+ list_add(&cache->list, &cache->space_info->block_groups);
|
|
|
+ spin_unlock(&cache->space_info->lock);
|
|
|
|
|
|
- bit = block_group_state_bits(type);
|
|
|
- set_extent_bits(block_group_cache, chunk_offset,
|
|
|
- chunk_offset + size - 1,
|
|
|
- EXTENT_LOCKED, GFP_NOFS);
|
|
|
- set_state_private(block_group_cache, chunk_offset,
|
|
|
- (unsigned long)cache);
|
|
|
- set_extent_bits(block_group_cache, chunk_offset,
|
|
|
- chunk_offset + size - 1,
|
|
|
- bit | EXTENT_LOCKED, GFP_NOFS);
|
|
|
+ ret = btrfs_add_block_group_cache(root->fs_info, cache);
|
|
|
+ BUG_ON(ret);
|
|
|
|
|
|
ret = btrfs_insert_item(trans, extent_root, &cache->key, &cache->item,
|
|
|
sizeof(cache->item));
|