|
@@ -144,6 +144,7 @@ struct pool_features {
|
|
|
bool zero_new_blocks:1;
|
|
|
bool discard_enabled:1;
|
|
|
bool discard_passdown:1;
|
|
|
+ bool error_if_no_space:1;
|
|
|
};
|
|
|
|
|
|
struct thin_c;
|
|
@@ -163,8 +164,7 @@ struct pool {
|
|
|
int sectors_per_block_shift;
|
|
|
|
|
|
struct pool_features pf;
|
|
|
- unsigned low_water_triggered:1; /* A dm event has been sent */
|
|
|
- unsigned no_free_space:1; /* A -ENOSPC warning has been issued */
|
|
|
+ bool low_water_triggered:1; /* A dm event has been sent */
|
|
|
|
|
|
struct dm_bio_prison *prison;
|
|
|
struct dm_kcopyd_client *copier;
|
|
@@ -198,7 +198,8 @@ struct pool {
|
|
|
};
|
|
|
|
|
|
static enum pool_mode get_pool_mode(struct pool *pool);
|
|
|
-static void set_pool_mode(struct pool *pool, enum pool_mode mode);
|
|
|
+static void out_of_data_space(struct pool *pool);
|
|
|
+static void metadata_operation_failed(struct pool *pool, const char *op, int r);
|
|
|
|
|
|
/*
|
|
|
* Target context for a pool.
|
|
@@ -509,15 +510,16 @@ static void remap_and_issue(struct thin_c *tc, struct bio *bio,
|
|
|
struct dm_thin_new_mapping {
|
|
|
struct list_head list;
|
|
|
|
|
|
- unsigned quiesced:1;
|
|
|
- unsigned prepared:1;
|
|
|
- unsigned pass_discard:1;
|
|
|
+ bool quiesced:1;
|
|
|
+ bool prepared:1;
|
|
|
+ bool pass_discard:1;
|
|
|
+ bool definitely_not_shared:1;
|
|
|
|
|
|
+ int err;
|
|
|
struct thin_c *tc;
|
|
|
dm_block_t virt_block;
|
|
|
dm_block_t data_block;
|
|
|
struct dm_bio_prison_cell *cell, *cell2;
|
|
|
- int err;
|
|
|
|
|
|
/*
|
|
|
* If the bio covers the whole area of a block then we can avoid
|
|
@@ -534,7 +536,7 @@ static void __maybe_add_mapping(struct dm_thin_new_mapping *m)
|
|
|
struct pool *pool = m->tc->pool;
|
|
|
|
|
|
if (m->quiesced && m->prepared) {
|
|
|
- list_add(&m->list, &pool->prepared_mappings);
|
|
|
+ list_add_tail(&m->list, &pool->prepared_mappings);
|
|
|
wake_worker(pool);
|
|
|
}
|
|
|
}
|
|
@@ -548,7 +550,7 @@ static void copy_complete(int read_err, unsigned long write_err, void *context)
|
|
|
m->err = read_err || write_err ? -EIO : 0;
|
|
|
|
|
|
spin_lock_irqsave(&pool->lock, flags);
|
|
|
- m->prepared = 1;
|
|
|
+ m->prepared = true;
|
|
|
__maybe_add_mapping(m);
|
|
|
spin_unlock_irqrestore(&pool->lock, flags);
|
|
|
}
|
|
@@ -563,7 +565,7 @@ static void overwrite_endio(struct bio *bio, int err)
|
|
|
m->err = err;
|
|
|
|
|
|
spin_lock_irqsave(&pool->lock, flags);
|
|
|
- m->prepared = 1;
|
|
|
+ m->prepared = true;
|
|
|
__maybe_add_mapping(m);
|
|
|
spin_unlock_irqrestore(&pool->lock, flags);
|
|
|
}
|
|
@@ -640,9 +642,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
|
|
|
*/
|
|
|
r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block);
|
|
|
if (r) {
|
|
|
- DMERR_LIMIT("%s: dm_thin_insert_block() failed: error = %d",
|
|
|
- dm_device_name(pool->pool_md), r);
|
|
|
- set_pool_mode(pool, PM_READ_ONLY);
|
|
|
+ metadata_operation_failed(pool, "dm_thin_insert_block", r);
|
|
|
cell_error(pool, m->cell);
|
|
|
goto out;
|
|
|
}
|
|
@@ -683,7 +683,15 @@ static void process_prepared_discard_passdown(struct dm_thin_new_mapping *m)
|
|
|
cell_defer_no_holder(tc, m->cell2);
|
|
|
|
|
|
if (m->pass_discard)
|
|
|
- remap_and_issue(tc, m->bio, m->data_block);
|
|
|
+ if (m->definitely_not_shared)
|
|
|
+ remap_and_issue(tc, m->bio, m->data_block);
|
|
|
+ else {
|
|
|
+ bool used = false;
|
|
|
+ if (dm_pool_block_is_used(tc->pool->pmd, m->data_block, &used) || used)
|
|
|
+ bio_endio(m->bio, 0);
|
|
|
+ else
|
|
|
+ remap_and_issue(tc, m->bio, m->data_block);
|
|
|
+ }
|
|
|
else
|
|
|
bio_endio(m->bio, 0);
|
|
|
|
|
@@ -751,13 +759,17 @@ static int ensure_next_mapping(struct pool *pool)
|
|
|
|
|
|
static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool)
|
|
|
{
|
|
|
- struct dm_thin_new_mapping *r = pool->next_mapping;
|
|
|
+ struct dm_thin_new_mapping *m = pool->next_mapping;
|
|
|
|
|
|
BUG_ON(!pool->next_mapping);
|
|
|
|
|
|
+ memset(m, 0, sizeof(struct dm_thin_new_mapping));
|
|
|
+ INIT_LIST_HEAD(&m->list);
|
|
|
+ m->bio = NULL;
|
|
|
+
|
|
|
pool->next_mapping = NULL;
|
|
|
|
|
|
- return r;
|
|
|
+ return m;
|
|
|
}
|
|
|
|
|
|
static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
|
|
@@ -769,18 +781,13 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
|
|
|
struct pool *pool = tc->pool;
|
|
|
struct dm_thin_new_mapping *m = get_next_mapping(pool);
|
|
|
|
|
|
- INIT_LIST_HEAD(&m->list);
|
|
|
- m->quiesced = 0;
|
|
|
- m->prepared = 0;
|
|
|
m->tc = tc;
|
|
|
m->virt_block = virt_block;
|
|
|
m->data_block = data_dest;
|
|
|
m->cell = cell;
|
|
|
- m->err = 0;
|
|
|
- m->bio = NULL;
|
|
|
|
|
|
if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list))
|
|
|
- m->quiesced = 1;
|
|
|
+ m->quiesced = true;
|
|
|
|
|
|
/*
|
|
|
* IO to pool_dev remaps to the pool target's data_dev.
|
|
@@ -840,15 +847,12 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
|
|
|
struct pool *pool = tc->pool;
|
|
|
struct dm_thin_new_mapping *m = get_next_mapping(pool);
|
|
|
|
|
|
- INIT_LIST_HEAD(&m->list);
|
|
|
- m->quiesced = 1;
|
|
|
- m->prepared = 0;
|
|
|
+ m->quiesced = true;
|
|
|
+ m->prepared = false;
|
|
|
m->tc = tc;
|
|
|
m->virt_block = virt_block;
|
|
|
m->data_block = data_block;
|
|
|
m->cell = cell;
|
|
|
- m->err = 0;
|
|
|
- m->bio = NULL;
|
|
|
|
|
|
/*
|
|
|
* If the whole block of data is being overwritten or we are not
|
|
@@ -895,41 +899,42 @@ static int commit(struct pool *pool)
|
|
|
return -EINVAL;
|
|
|
|
|
|
r = dm_pool_commit_metadata(pool->pmd);
|
|
|
- if (r) {
|
|
|
- DMERR_LIMIT("%s: dm_pool_commit_metadata failed: error = %d",
|
|
|
- dm_device_name(pool->pool_md), r);
|
|
|
- set_pool_mode(pool, PM_READ_ONLY);
|
|
|
- }
|
|
|
+ if (r)
|
|
|
+ metadata_operation_failed(pool, "dm_pool_commit_metadata", r);
|
|
|
|
|
|
return r;
|
|
|
}
|
|
|
|
|
|
-static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
|
|
|
+static void check_low_water_mark(struct pool *pool, dm_block_t free_blocks)
|
|
|
{
|
|
|
- int r;
|
|
|
- dm_block_t free_blocks;
|
|
|
unsigned long flags;
|
|
|
- struct pool *pool = tc->pool;
|
|
|
-
|
|
|
- /*
|
|
|
- * Once no_free_space is set we must not allow allocation to succeed.
|
|
|
- * Otherwise it is difficult to explain, debug, test and support.
|
|
|
- */
|
|
|
- if (pool->no_free_space)
|
|
|
- return -ENOSPC;
|
|
|
-
|
|
|
- r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
|
|
|
- if (r)
|
|
|
- return r;
|
|
|
|
|
|
if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) {
|
|
|
DMWARN("%s: reached low water mark for data device: sending event.",
|
|
|
dm_device_name(pool->pool_md));
|
|
|
spin_lock_irqsave(&pool->lock, flags);
|
|
|
- pool->low_water_triggered = 1;
|
|
|
+ pool->low_water_triggered = true;
|
|
|
spin_unlock_irqrestore(&pool->lock, flags);
|
|
|
dm_table_event(pool->ti->table);
|
|
|
}
|
|
|
+}
|
|
|
+
|
|
|
+static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
|
|
|
+{
|
|
|
+ int r;
|
|
|
+ dm_block_t free_blocks;
|
|
|
+ struct pool *pool = tc->pool;
|
|
|
+
|
|
|
+ if (get_pool_mode(pool) != PM_WRITE)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
|
|
|
+ if (r) {
|
|
|
+ metadata_operation_failed(pool, "dm_pool_get_free_block_count", r);
|
|
|
+ return r;
|
|
|
+ }
|
|
|
+
|
|
|
+ check_low_water_mark(pool, free_blocks);
|
|
|
|
|
|
if (!free_blocks) {
|
|
|
/*
|
|
@@ -941,35 +946,20 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
|
|
|
return r;
|
|
|
|
|
|
r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
|
|
|
- if (r)
|
|
|
+ if (r) {
|
|
|
+ metadata_operation_failed(pool, "dm_pool_get_free_block_count", r);
|
|
|
return r;
|
|
|
+ }
|
|
|
|
|
|
- /*
|
|
|
- * If we still have no space we set a flag to avoid
|
|
|
- * doing all this checking and return -ENOSPC. This
|
|
|
- * flag serves as a latch that disallows allocations from
|
|
|
- * this pool until the admin takes action (e.g. resize or
|
|
|
- * table reload).
|
|
|
- */
|
|
|
if (!free_blocks) {
|
|
|
- DMWARN("%s: no free data space available.",
|
|
|
- dm_device_name(pool->pool_md));
|
|
|
- spin_lock_irqsave(&pool->lock, flags);
|
|
|
- pool->no_free_space = 1;
|
|
|
- spin_unlock_irqrestore(&pool->lock, flags);
|
|
|
+ out_of_data_space(pool);
|
|
|
return -ENOSPC;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
r = dm_pool_alloc_data_block(pool->pmd, result);
|
|
|
if (r) {
|
|
|
- if (r == -ENOSPC &&
|
|
|
- !dm_pool_get_free_metadata_block_count(pool->pmd, &free_blocks) &&
|
|
|
- !free_blocks) {
|
|
|
- DMWARN("%s: no free metadata space available.",
|
|
|
- dm_device_name(pool->pool_md));
|
|
|
- set_pool_mode(pool, PM_READ_ONLY);
|
|
|
- }
|
|
|
+ metadata_operation_failed(pool, "dm_pool_alloc_data_block", r);
|
|
|
return r;
|
|
|
}
|
|
|
|
|
@@ -992,7 +982,21 @@ static void retry_on_resume(struct bio *bio)
|
|
|
spin_unlock_irqrestore(&pool->lock, flags);
|
|
|
}
|
|
|
|
|
|
-static void no_space(struct pool *pool, struct dm_bio_prison_cell *cell)
|
|
|
+static void handle_unserviceable_bio(struct pool *pool, struct bio *bio)
|
|
|
+{
|
|
|
+ /*
|
|
|
+ * When pool is read-only, no cell locking is needed because
|
|
|
+ * nothing is changing.
|
|
|
+ */
|
|
|
+ WARN_ON_ONCE(get_pool_mode(pool) != PM_READ_ONLY);
|
|
|
+
|
|
|
+ if (pool->pf.error_if_no_space)
|
|
|
+ bio_io_error(bio);
|
|
|
+ else
|
|
|
+ retry_on_resume(bio);
|
|
|
+}
|
|
|
+
|
|
|
+static void retry_bios_on_resume(struct pool *pool, struct dm_bio_prison_cell *cell)
|
|
|
{
|
|
|
struct bio *bio;
|
|
|
struct bio_list bios;
|
|
@@ -1001,7 +1005,7 @@ static void no_space(struct pool *pool, struct dm_bio_prison_cell *cell)
|
|
|
cell_release(pool, cell, &bios);
|
|
|
|
|
|
while ((bio = bio_list_pop(&bios)))
|
|
|
- retry_on_resume(bio);
|
|
|
+ handle_unserviceable_bio(pool, bio);
|
|
|
}
|
|
|
|
|
|
static void process_discard(struct thin_c *tc, struct bio *bio)
|
|
@@ -1040,17 +1044,17 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
|
|
|
*/
|
|
|
m = get_next_mapping(pool);
|
|
|
m->tc = tc;
|
|
|
- m->pass_discard = (!lookup_result.shared) && pool->pf.discard_passdown;
|
|
|
+ m->pass_discard = pool->pf.discard_passdown;
|
|
|
+ m->definitely_not_shared = !lookup_result.shared;
|
|
|
m->virt_block = block;
|
|
|
m->data_block = lookup_result.block;
|
|
|
m->cell = cell;
|
|
|
m->cell2 = cell2;
|
|
|
- m->err = 0;
|
|
|
m->bio = bio;
|
|
|
|
|
|
if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) {
|
|
|
spin_lock_irqsave(&pool->lock, flags);
|
|
|
- list_add(&m->list, &pool->prepared_discards);
|
|
|
+ list_add_tail(&m->list, &pool->prepared_discards);
|
|
|
spin_unlock_irqrestore(&pool->lock, flags);
|
|
|
wake_worker(pool);
|
|
|
}
|
|
@@ -1105,13 +1109,12 @@ static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
|
|
|
break;
|
|
|
|
|
|
case -ENOSPC:
|
|
|
- no_space(pool, cell);
|
|
|
+ retry_bios_on_resume(pool, cell);
|
|
|
break;
|
|
|
|
|
|
default:
|
|
|
DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
|
|
|
__func__, r);
|
|
|
- set_pool_mode(pool, PM_READ_ONLY);
|
|
|
cell_error(pool, cell);
|
|
|
break;
|
|
|
}
|
|
@@ -1184,13 +1187,12 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block
|
|
|
break;
|
|
|
|
|
|
case -ENOSPC:
|
|
|
- no_space(pool, cell);
|
|
|
+ retry_bios_on_resume(pool, cell);
|
|
|
break;
|
|
|
|
|
|
default:
|
|
|
DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
|
|
|
__func__, r);
|
|
|
- set_pool_mode(pool, PM_READ_ONLY);
|
|
|
cell_error(pool, cell);
|
|
|
break;
|
|
|
}
|
|
@@ -1257,7 +1259,7 @@ static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
|
|
|
switch (r) {
|
|
|
case 0:
|
|
|
if (lookup_result.shared && (rw == WRITE) && bio->bi_size)
|
|
|
- bio_io_error(bio);
|
|
|
+ handle_unserviceable_bio(tc->pool, bio);
|
|
|
else {
|
|
|
inc_all_io_entry(tc->pool, bio);
|
|
|
remap_and_issue(tc, bio, lookup_result.block);
|
|
@@ -1266,7 +1268,7 @@ static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
|
|
|
|
|
|
case -ENODATA:
|
|
|
if (rw != READ) {
|
|
|
- bio_io_error(bio);
|
|
|
+ handle_unserviceable_bio(tc->pool, bio);
|
|
|
break;
|
|
|
}
|
|
|
|
|
@@ -1390,16 +1392,16 @@ static enum pool_mode get_pool_mode(struct pool *pool)
|
|
|
return pool->pf.mode;
|
|
|
}
|
|
|
|
|
|
-static void set_pool_mode(struct pool *pool, enum pool_mode mode)
|
|
|
+static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
|
|
|
{
|
|
|
int r;
|
|
|
+ enum pool_mode old_mode = pool->pf.mode;
|
|
|
|
|
|
- pool->pf.mode = mode;
|
|
|
-
|
|
|
- switch (mode) {
|
|
|
+ switch (new_mode) {
|
|
|
case PM_FAIL:
|
|
|
- DMERR("%s: switching pool to failure mode",
|
|
|
- dm_device_name(pool->pool_md));
|
|
|
+ if (old_mode != new_mode)
|
|
|
+ DMERR("%s: switching pool to failure mode",
|
|
|
+ dm_device_name(pool->pool_md));
|
|
|
dm_pool_metadata_read_only(pool->pmd);
|
|
|
pool->process_bio = process_bio_fail;
|
|
|
pool->process_discard = process_bio_fail;
|
|
@@ -1408,13 +1410,15 @@ static void set_pool_mode(struct pool *pool, enum pool_mode mode)
|
|
|
break;
|
|
|
|
|
|
case PM_READ_ONLY:
|
|
|
- DMERR("%s: switching pool to read-only mode",
|
|
|
- dm_device_name(pool->pool_md));
|
|
|
+ if (old_mode != new_mode)
|
|
|
+ DMERR("%s: switching pool to read-only mode",
|
|
|
+ dm_device_name(pool->pool_md));
|
|
|
r = dm_pool_abort_metadata(pool->pmd);
|
|
|
if (r) {
|
|
|
DMERR("%s: aborting transaction failed",
|
|
|
dm_device_name(pool->pool_md));
|
|
|
- set_pool_mode(pool, PM_FAIL);
|
|
|
+ new_mode = PM_FAIL;
|
|
|
+ set_pool_mode(pool, new_mode);
|
|
|
} else {
|
|
|
dm_pool_metadata_read_only(pool->pmd);
|
|
|
pool->process_bio = process_bio_read_only;
|
|
@@ -1425,6 +1429,9 @@ static void set_pool_mode(struct pool *pool, enum pool_mode mode)
|
|
|
break;
|
|
|
|
|
|
case PM_WRITE:
|
|
|
+ if (old_mode != new_mode)
|
|
|
+ DMINFO("%s: switching pool to write mode",
|
|
|
+ dm_device_name(pool->pool_md));
|
|
|
dm_pool_metadata_read_write(pool->pmd);
|
|
|
pool->process_bio = process_bio;
|
|
|
pool->process_discard = process_discard;
|
|
@@ -1432,6 +1439,35 @@ static void set_pool_mode(struct pool *pool, enum pool_mode mode)
|
|
|
pool->process_prepared_discard = process_prepared_discard;
|
|
|
break;
|
|
|
}
|
|
|
+
|
|
|
+ pool->pf.mode = new_mode;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Rather than calling set_pool_mode directly, use these which describe the
|
|
|
+ * reason for mode degradation.
|
|
|
+ */
|
|
|
+static void out_of_data_space(struct pool *pool)
|
|
|
+{
|
|
|
+ DMERR_LIMIT("%s: no free data space available.",
|
|
|
+ dm_device_name(pool->pool_md));
|
|
|
+ set_pool_mode(pool, PM_READ_ONLY);
|
|
|
+}
|
|
|
+
|
|
|
+static void metadata_operation_failed(struct pool *pool, const char *op, int r)
|
|
|
+{
|
|
|
+ dm_block_t free_blocks;
|
|
|
+
|
|
|
+ DMERR_LIMIT("%s: metadata operation '%s' failed: error = %d",
|
|
|
+ dm_device_name(pool->pool_md), op, r);
|
|
|
+
|
|
|
+ if (r == -ENOSPC &&
|
|
|
+ !dm_pool_get_free_metadata_block_count(pool->pmd, &free_blocks) &&
|
|
|
+ !free_blocks)
|
|
|
+ DMERR_LIMIT("%s: no free metadata space available.",
|
|
|
+ dm_device_name(pool->pool_md));
|
|
|
+
|
|
|
+ set_pool_mode(pool, PM_READ_ONLY);
|
|
|
}
|
|
|
|
|
|
/*----------------------------------------------------------------*/
|
|
@@ -1538,9 +1574,9 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
|
|
|
if (get_pool_mode(tc->pool) == PM_READ_ONLY) {
|
|
|
/*
|
|
|
* This block isn't provisioned, and we have no way
|
|
|
- * of doing so. Just error it.
|
|
|
+ * of doing so.
|
|
|
*/
|
|
|
- bio_io_error(bio);
|
|
|
+ handle_unserviceable_bio(tc->pool, bio);
|
|
|
return DM_MAPIO_SUBMITTED;
|
|
|
}
|
|
|
/* fall through */
|
|
@@ -1647,6 +1683,17 @@ static int bind_control_target(struct pool *pool, struct dm_target *ti)
|
|
|
enum pool_mode old_mode = pool->pf.mode;
|
|
|
enum pool_mode new_mode = pt->adjusted_pf.mode;
|
|
|
|
|
|
+ /*
|
|
|
+ * Don't change the pool's mode until set_pool_mode() below.
|
|
|
+ * Otherwise the pool's process_* function pointers may
|
|
|
+ * not match the desired pool mode.
|
|
|
+ */
|
|
|
+ pt->adjusted_pf.mode = old_mode;
|
|
|
+
|
|
|
+ pool->ti = ti;
|
|
|
+ pool->pf = pt->adjusted_pf;
|
|
|
+ pool->low_water_blocks = pt->low_water_blocks;
|
|
|
+
|
|
|
/*
|
|
|
* If we were in PM_FAIL mode, rollback of metadata failed. We're
|
|
|
* not going to recover without a thin_repair. So we never let the
|
|
@@ -1657,10 +1704,6 @@ static int bind_control_target(struct pool *pool, struct dm_target *ti)
|
|
|
if (old_mode == PM_FAIL)
|
|
|
new_mode = old_mode;
|
|
|
|
|
|
- pool->ti = ti;
|
|
|
- pool->low_water_blocks = pt->low_water_blocks;
|
|
|
- pool->pf = pt->adjusted_pf;
|
|
|
-
|
|
|
set_pool_mode(pool, new_mode);
|
|
|
|
|
|
return 0;
|
|
@@ -1682,6 +1725,7 @@ static void pool_features_init(struct pool_features *pf)
|
|
|
pf->zero_new_blocks = true;
|
|
|
pf->discard_enabled = true;
|
|
|
pf->discard_passdown = true;
|
|
|
+ pf->error_if_no_space = false;
|
|
|
}
|
|
|
|
|
|
static void __pool_destroy(struct pool *pool)
|
|
@@ -1772,8 +1816,7 @@ static struct pool *pool_create(struct mapped_device *pool_md,
|
|
|
bio_list_init(&pool->deferred_flush_bios);
|
|
|
INIT_LIST_HEAD(&pool->prepared_mappings);
|
|
|
INIT_LIST_HEAD(&pool->prepared_discards);
|
|
|
- pool->low_water_triggered = 0;
|
|
|
- pool->no_free_space = 0;
|
|
|
+ pool->low_water_triggered = false;
|
|
|
bio_list_init(&pool->retry_on_resume_list);
|
|
|
|
|
|
pool->shared_read_ds = dm_deferred_set_create();
|
|
@@ -1898,7 +1941,7 @@ static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf,
|
|
|
const char *arg_name;
|
|
|
|
|
|
static struct dm_arg _args[] = {
|
|
|
- {0, 3, "Invalid number of pool feature arguments"},
|
|
|
+ {0, 4, "Invalid number of pool feature arguments"},
|
|
|
};
|
|
|
|
|
|
/*
|
|
@@ -1927,6 +1970,9 @@ static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf,
|
|
|
else if (!strcasecmp(arg_name, "read_only"))
|
|
|
pf->mode = PM_READ_ONLY;
|
|
|
|
|
|
+ else if (!strcasecmp(arg_name, "error_if_no_space"))
|
|
|
+ pf->error_if_no_space = true;
|
|
|
+
|
|
|
else {
|
|
|
ti->error = "Unrecognised pool feature requested";
|
|
|
r = -EINVAL;
|
|
@@ -1997,6 +2043,8 @@ static dm_block_t calc_metadata_threshold(struct pool_c *pt)
|
|
|
* skip_block_zeroing: skips the zeroing of newly-provisioned blocks.
|
|
|
* ignore_discard: disable discard
|
|
|
* no_discard_passdown: don't pass discards down to the data device
|
|
|
+ * read_only: Don't allow any changes to be made to the pool metadata.
|
|
|
+ * error_if_no_space: error IOs, instead of queueing, if no space.
|
|
|
*/
|
|
|
static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
|
|
|
{
|
|
@@ -2192,11 +2240,13 @@ static int maybe_resize_data_dev(struct dm_target *ti, bool *need_commit)
|
|
|
return -EINVAL;
|
|
|
|
|
|
} else if (data_size > sb_data_size) {
|
|
|
+ if (sb_data_size)
|
|
|
+ DMINFO("%s: growing the data device from %llu to %llu blocks",
|
|
|
+ dm_device_name(pool->pool_md),
|
|
|
+ sb_data_size, (unsigned long long)data_size);
|
|
|
r = dm_pool_resize_data_dev(pool->pmd, data_size);
|
|
|
if (r) {
|
|
|
- DMERR("%s: failed to resize data device",
|
|
|
- dm_device_name(pool->pool_md));
|
|
|
- set_pool_mode(pool, PM_READ_ONLY);
|
|
|
+ metadata_operation_failed(pool, "dm_pool_resize_data_dev", r);
|
|
|
return r;
|
|
|
}
|
|
|
|
|
@@ -2231,10 +2281,12 @@ static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit)
|
|
|
return -EINVAL;
|
|
|
|
|
|
} else if (metadata_dev_size > sb_metadata_dev_size) {
|
|
|
+ DMINFO("%s: growing the metadata device from %llu to %llu blocks",
|
|
|
+ dm_device_name(pool->pool_md),
|
|
|
+ sb_metadata_dev_size, metadata_dev_size);
|
|
|
r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size);
|
|
|
if (r) {
|
|
|
- DMERR("%s: failed to resize metadata device",
|
|
|
- dm_device_name(pool->pool_md));
|
|
|
+ metadata_operation_failed(pool, "dm_pool_resize_metadata_dev", r);
|
|
|
return r;
|
|
|
}
|
|
|
|
|
@@ -2290,8 +2342,7 @@ static void pool_resume(struct dm_target *ti)
|
|
|
unsigned long flags;
|
|
|
|
|
|
spin_lock_irqsave(&pool->lock, flags);
|
|
|
- pool->low_water_triggered = 0;
|
|
|
- pool->no_free_space = 0;
|
|
|
+ pool->low_water_triggered = false;
|
|
|
__requeue_bios(pool);
|
|
|
spin_unlock_irqrestore(&pool->lock, flags);
|
|
|
|
|
@@ -2510,7 +2561,8 @@ static void emit_flags(struct pool_features *pf, char *result,
|
|
|
unsigned sz, unsigned maxlen)
|
|
|
{
|
|
|
unsigned count = !pf->zero_new_blocks + !pf->discard_enabled +
|
|
|
- !pf->discard_passdown + (pf->mode == PM_READ_ONLY);
|
|
|
+ !pf->discard_passdown + (pf->mode == PM_READ_ONLY) +
|
|
|
+ pf->error_if_no_space;
|
|
|
DMEMIT("%u ", count);
|
|
|
|
|
|
if (!pf->zero_new_blocks)
|
|
@@ -2524,6 +2576,9 @@ static void emit_flags(struct pool_features *pf, char *result,
|
|
|
|
|
|
if (pf->mode == PM_READ_ONLY)
|
|
|
DMEMIT("read_only ");
|
|
|
+
|
|
|
+ if (pf->error_if_no_space)
|
|
|
+ DMEMIT("error_if_no_space ");
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -2618,11 +2673,16 @@ static void pool_status(struct dm_target *ti, status_type_t type,
|
|
|
DMEMIT("rw ");
|
|
|
|
|
|
if (!pool->pf.discard_enabled)
|
|
|
- DMEMIT("ignore_discard");
|
|
|
+ DMEMIT("ignore_discard ");
|
|
|
else if (pool->pf.discard_passdown)
|
|
|
- DMEMIT("discard_passdown");
|
|
|
+ DMEMIT("discard_passdown ");
|
|
|
+ else
|
|
|
+ DMEMIT("no_discard_passdown ");
|
|
|
+
|
|
|
+ if (pool->pf.error_if_no_space)
|
|
|
+ DMEMIT("error_if_no_space ");
|
|
|
else
|
|
|
- DMEMIT("no_discard_passdown");
|
|
|
+ DMEMIT("queue_if_no_space ");
|
|
|
|
|
|
break;
|
|
|
|
|
@@ -2721,7 +2781,7 @@ static struct target_type pool_target = {
|
|
|
.name = "thin-pool",
|
|
|
.features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
|
|
|
DM_TARGET_IMMUTABLE,
|
|
|
- .version = {1, 9, 0},
|
|
|
+ .version = {1, 10, 0},
|
|
|
.module = THIS_MODULE,
|
|
|
.ctr = pool_ctr,
|
|
|
.dtr = pool_dtr,
|
|
@@ -2899,7 +2959,7 @@ static int thin_endio(struct dm_target *ti, struct bio *bio, int err)
|
|
|
spin_lock_irqsave(&pool->lock, flags);
|
|
|
list_for_each_entry_safe(m, tmp, &work, list) {
|
|
|
list_del(&m->list);
|
|
|
- m->quiesced = 1;
|
|
|
+ m->quiesced = true;
|
|
|
__maybe_add_mapping(m);
|
|
|
}
|
|
|
spin_unlock_irqrestore(&pool->lock, flags);
|
|
@@ -2911,7 +2971,7 @@ static int thin_endio(struct dm_target *ti, struct bio *bio, int err)
|
|
|
if (!list_empty(&work)) {
|
|
|
spin_lock_irqsave(&pool->lock, flags);
|
|
|
list_for_each_entry_safe(m, tmp, &work, list)
|
|
|
- list_add(&m->list, &pool->prepared_discards);
|
|
|
+ list_add_tail(&m->list, &pool->prepared_discards);
|
|
|
spin_unlock_irqrestore(&pool->lock, flags);
|
|
|
wake_worker(pool);
|
|
|
}
|
|
@@ -3008,7 +3068,7 @@ static int thin_iterate_devices(struct dm_target *ti,
|
|
|
|
|
|
static struct target_type thin_target = {
|
|
|
.name = "thin",
|
|
|
- .version = {1, 9, 0},
|
|
|
+ .version = {1, 10, 0},
|
|
|
.module = THIS_MODULE,
|
|
|
.ctr = thin_ctr,
|
|
|
.dtr = thin_dtr,
|