|
@@ -315,12 +315,6 @@ get_caching_control(struct btrfs_block_group_cache *cache)
|
|
struct btrfs_caching_control *ctl;
|
|
struct btrfs_caching_control *ctl;
|
|
|
|
|
|
spin_lock(&cache->lock);
|
|
spin_lock(&cache->lock);
|
|
- if (cache->cached != BTRFS_CACHE_STARTED) {
|
|
|
|
- spin_unlock(&cache->lock);
|
|
|
|
- return NULL;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- /* We're loading it the fast way, so we don't have a caching_ctl. */
|
|
|
|
if (!cache->caching_ctl) {
|
|
if (!cache->caching_ctl) {
|
|
spin_unlock(&cache->lock);
|
|
spin_unlock(&cache->lock);
|
|
return NULL;
|
|
return NULL;
|
|
@@ -594,6 +588,7 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
|
|
spin_unlock(&cache->lock);
|
|
spin_unlock(&cache->lock);
|
|
|
|
|
|
if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) {
|
|
if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) {
|
|
|
|
+ mutex_lock(&caching_ctl->mutex);
|
|
ret = load_free_space_cache(fs_info, cache);
|
|
ret = load_free_space_cache(fs_info, cache);
|
|
|
|
|
|
spin_lock(&cache->lock);
|
|
spin_lock(&cache->lock);
|
|
@@ -601,6 +596,7 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
|
|
cache->caching_ctl = NULL;
|
|
cache->caching_ctl = NULL;
|
|
cache->cached = BTRFS_CACHE_FINISHED;
|
|
cache->cached = BTRFS_CACHE_FINISHED;
|
|
cache->last_byte_to_unpin = (u64)-1;
|
|
cache->last_byte_to_unpin = (u64)-1;
|
|
|
|
+ caching_ctl->progress = (u64)-1;
|
|
} else {
|
|
} else {
|
|
if (load_cache_only) {
|
|
if (load_cache_only) {
|
|
cache->caching_ctl = NULL;
|
|
cache->caching_ctl = NULL;
|
|
@@ -611,6 +607,8 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
|
|
}
|
|
}
|
|
}
|
|
}
|
|
spin_unlock(&cache->lock);
|
|
spin_unlock(&cache->lock);
|
|
|
|
+ mutex_unlock(&caching_ctl->mutex);
|
|
|
|
+
|
|
wake_up(&caching_ctl->wait);
|
|
wake_up(&caching_ctl->wait);
|
|
if (ret == 1) {
|
|
if (ret == 1) {
|
|
put_caching_control(caching_ctl);
|
|
put_caching_control(caching_ctl);
|