|
@@ -1127,6 +1127,24 @@ static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
|
|
schedule_zero(tc, virt_block, data_dest, cell, bio);
|
|
schedule_zero(tc, virt_block, data_dest, cell, bio);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
|
|
|
|
+
|
|
|
|
+static void check_for_space(struct pool *pool)
|
|
|
|
+{
|
|
|
|
+ int r;
|
|
|
|
+ dm_block_t nr_free;
|
|
|
|
+
|
|
|
|
+ if (get_pool_mode(pool) != PM_OUT_OF_DATA_SPACE)
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ r = dm_pool_get_free_block_count(pool->pmd, &nr_free);
|
|
|
|
+ if (r)
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ if (nr_free)
|
|
|
|
+ set_pool_mode(pool, PM_WRITE);
|
|
|
|
+}
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* A non-zero return indicates read_only or fail_io mode.
|
|
* A non-zero return indicates read_only or fail_io mode.
|
|
* Many callers don't care about the return value.
|
|
* Many callers don't care about the return value.
|
|
@@ -1141,6 +1159,8 @@ static int commit(struct pool *pool)
|
|
r = dm_pool_commit_metadata(pool->pmd);
|
|
r = dm_pool_commit_metadata(pool->pmd);
|
|
if (r)
|
|
if (r)
|
|
metadata_operation_failed(pool, "dm_pool_commit_metadata", r);
|
|
metadata_operation_failed(pool, "dm_pool_commit_metadata", r);
|
|
|
|
+ else
|
|
|
|
+ check_for_space(pool);
|
|
|
|
|
|
return r;
|
|
return r;
|
|
}
|
|
}
|
|
@@ -1159,8 +1179,6 @@ static void check_low_water_mark(struct pool *pool, dm_block_t free_blocks)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
-static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
|
|
|
|
-
|
|
|
|
static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
|
|
static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
|
|
{
|
|
{
|
|
int r;
|
|
int r;
|
|
@@ -2155,7 +2173,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
|
|
pool->process_cell = process_cell_read_only;
|
|
pool->process_cell = process_cell_read_only;
|
|
pool->process_discard_cell = process_discard_cell;
|
|
pool->process_discard_cell = process_discard_cell;
|
|
pool->process_prepared_mapping = process_prepared_mapping;
|
|
pool->process_prepared_mapping = process_prepared_mapping;
|
|
- pool->process_prepared_discard = process_prepared_discard_passdown;
|
|
|
|
|
|
+ pool->process_prepared_discard = process_prepared_discard;
|
|
|
|
|
|
if (!pool->pf.error_if_no_space && no_space_timeout)
|
|
if (!pool->pf.error_if_no_space && no_space_timeout)
|
|
queue_delayed_work(pool->wq, &pool->no_space_timeout, no_space_timeout);
|
|
queue_delayed_work(pool->wq, &pool->no_space_timeout, no_space_timeout);
|
|
@@ -3814,6 +3832,8 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
|
|
r = -EINVAL;
|
|
r = -EINVAL;
|
|
goto bad;
|
|
goto bad;
|
|
}
|
|
}
|
|
|
|
+ atomic_set(&tc->refcount, 1);
|
|
|
|
+ init_completion(&tc->can_destroy);
|
|
list_add_tail_rcu(&tc->list, &tc->pool->active_thins);
|
|
list_add_tail_rcu(&tc->list, &tc->pool->active_thins);
|
|
spin_unlock_irqrestore(&tc->pool->lock, flags);
|
|
spin_unlock_irqrestore(&tc->pool->lock, flags);
|
|
/*
|
|
/*
|
|
@@ -3826,9 +3846,6 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
|
|
|
|
|
|
dm_put(pool_md);
|
|
dm_put(pool_md);
|
|
|
|
|
|
- atomic_set(&tc->refcount, 1);
|
|
|
|
- init_completion(&tc->can_destroy);
|
|
|
|
-
|
|
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
bad:
|
|
bad:
|