|
@@ -334,26 +334,55 @@ static sector_t block_to_sectors(struct pool *pool, dm_block_t b)
|
|
|
(b * pool->sectors_per_block);
|
|
|
}
|
|
|
|
|
|
-static int issue_discard(struct thin_c *tc, dm_block_t data_b, dm_block_t data_e,
|
|
|
- struct bio *parent_bio)
|
|
|
+/*----------------------------------------------------------------*/
|
|
|
+
|
|
|
+struct discard_op {
|
|
|
+ struct thin_c *tc;
|
|
|
+ struct blk_plug plug;
|
|
|
+ struct bio *parent_bio;
|
|
|
+ struct bio *bio;
|
|
|
+};
|
|
|
+
|
|
|
+static void begin_discard(struct discard_op *op, struct thin_c *tc, struct bio *parent)
|
|
|
+{
|
|
|
+ BUG_ON(!parent);
|
|
|
+
|
|
|
+ op->tc = tc;
|
|
|
+ blk_start_plug(&op->plug);
|
|
|
+ op->parent_bio = parent;
|
|
|
+ op->bio = NULL;
|
|
|
+}
|
|
|
+
|
|
|
+static int issue_discard(struct discard_op *op, dm_block_t data_b, dm_block_t data_e)
|
|
|
{
|
|
|
- int type = REQ_WRITE | REQ_DISCARD;
|
|
|
+ struct thin_c *tc = op->tc;
|
|
|
sector_t s = block_to_sectors(tc->pool, data_b);
|
|
|
sector_t len = block_to_sectors(tc->pool, data_e - data_b);
|
|
|
- struct bio *bio = NULL;
|
|
|
- struct blk_plug plug;
|
|
|
- int ret;
|
|
|
|
|
|
- blk_start_plug(&plug);
|
|
|
- ret = __blkdev_issue_discard(tc->pool_dev->bdev, s, len,
|
|
|
- GFP_NOWAIT, type, &bio);
|
|
|
- if (!ret && bio) {
|
|
|
- bio_chain(bio, parent_bio);
|
|
|
- submit_bio(type, bio);
|
|
|
+ return __blkdev_issue_discard(tc->pool_dev->bdev, s, len,
|
|
|
+ GFP_NOWAIT, REQ_WRITE | REQ_DISCARD, &op->bio);
|
|
|
+}
|
|
|
+
|
|
|
+static void end_discard(struct discard_op *op, int r)
|
|
|
+{
|
|
|
+ if (op->bio) {
|
|
|
+ /*
|
|
|
+ * Even if one of the calls to issue_discard failed, we
|
|
|
+ * need to wait for the chain to complete.
|
|
|
+ */
|
|
|
+ bio_chain(op->bio, op->parent_bio);
|
|
|
+ submit_bio(REQ_WRITE | REQ_DISCARD, op->bio);
|
|
|
}
|
|
|
- blk_finish_plug(&plug);
|
|
|
|
|
|
- return ret;
|
|
|
+ blk_finish_plug(&op->plug);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Even if r is set, there could be sub discards in flight that we
|
|
|
+ * need to wait for.
|
|
|
+ */
|
|
|
+ if (r && !op->parent_bio->bi_error)
|
|
|
+ op->parent_bio->bi_error = r;
|
|
|
+ bio_endio(op->parent_bio);
|
|
|
}
|
|
|
|
|
|
/*----------------------------------------------------------------*/
|
|
@@ -968,24 +997,28 @@ static void process_prepared_discard_no_passdown(struct dm_thin_new_mapping *m)
|
|
|
mempool_free(m, tc->pool->mapping_pool);
|
|
|
}
|
|
|
|
|
|
-static int passdown_double_checking_shared_status(struct dm_thin_new_mapping *m)
|
|
|
+/*----------------------------------------------------------------*/
|
|
|
+
|
|
|
+static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m)
|
|
|
{
|
|
|
/*
|
|
|
* We've already unmapped this range of blocks, but before we
|
|
|
* passdown we have to check that these blocks are now unused.
|
|
|
*/
|
|
|
- int r;
|
|
|
+ int r = 0;
|
|
|
bool used = true;
|
|
|
struct thin_c *tc = m->tc;
|
|
|
struct pool *pool = tc->pool;
|
|
|
dm_block_t b = m->data_block, e, end = m->data_block + m->virt_end - m->virt_begin;
|
|
|
+ struct discard_op op;
|
|
|
|
|
|
+ begin_discard(&op, tc, m->bio);
|
|
|
while (b != end) {
|
|
|
/* find start of unmapped run */
|
|
|
for (; b < end; b++) {
|
|
|
r = dm_pool_block_is_used(pool->pmd, b, &used);
|
|
|
if (r)
|
|
|
- return r;
|
|
|
+ goto out;
|
|
|
|
|
|
if (!used)
|
|
|
break;
|
|
@@ -998,20 +1031,20 @@ static int passdown_double_checking_shared_status(struct dm_thin_new_mapping *m)
|
|
|
for (e = b + 1; e != end; e++) {
|
|
|
r = dm_pool_block_is_used(pool->pmd, e, &used);
|
|
|
if (r)
|
|
|
- return r;
|
|
|
+ goto out;
|
|
|
|
|
|
if (used)
|
|
|
break;
|
|
|
}
|
|
|
|
|
|
- r = issue_discard(tc, b, e, m->bio);
|
|
|
+ r = issue_discard(&op, b, e);
|
|
|
if (r)
|
|
|
- return r;
|
|
|
+ goto out;
|
|
|
|
|
|
b = e;
|
|
|
}
|
|
|
-
|
|
|
- return 0;
|
|
|
+out:
|
|
|
+ end_discard(&op, r);
|
|
|
}
|
|
|
|
|
|
static void process_prepared_discard_passdown(struct dm_thin_new_mapping *m)
|
|
@@ -1021,20 +1054,21 @@ static void process_prepared_discard_passdown(struct dm_thin_new_mapping *m)
|
|
|
struct pool *pool = tc->pool;
|
|
|
|
|
|
r = dm_thin_remove_range(tc->td, m->virt_begin, m->virt_end);
|
|
|
- if (r)
|
|
|
+ if (r) {
|
|
|
metadata_operation_failed(pool, "dm_thin_remove_range", r);
|
|
|
+ bio_io_error(m->bio);
|
|
|
|
|
|
- else if (m->maybe_shared)
|
|
|
- r = passdown_double_checking_shared_status(m);
|
|
|
- else
|
|
|
- r = issue_discard(tc, m->data_block, m->data_block + (m->virt_end - m->virt_begin), m->bio);
|
|
|
+ } else if (m->maybe_shared) {
|
|
|
+ passdown_double_checking_shared_status(m);
|
|
|
+
|
|
|
+ } else {
|
|
|
+ struct discard_op op;
|
|
|
+ begin_discard(&op, tc, m->bio);
|
|
|
+ r = issue_discard(&op, m->data_block,
|
|
|
+ m->data_block + (m->virt_end - m->virt_begin));
|
|
|
+ end_discard(&op, r);
|
|
|
+ }
|
|
|
|
|
|
- /*
|
|
|
- * Even if r is set, there could be sub discards in flight that we
|
|
|
- * need to wait for.
|
|
|
- */
|
|
|
- m->bio->bi_error = r;
|
|
|
- bio_endio(m->bio);
|
|
|
cell_defer_no_holder(tc, m->cell);
|
|
|
mempool_free(m, pool->mapping_pool);
|
|
|
}
|
|
@@ -1505,11 +1539,11 @@ static void break_up_discard_bio(struct thin_c *tc, dm_block_t begin, dm_block_t
|
|
|
|
|
|
/*
|
|
|
* The parent bio must not complete before sub discard bios are
|
|
|
- * chained to it (see issue_discard's bio_chain)!
|
|
|
+ * chained to it (see end_discard's bio_chain)!
|
|
|
*
|
|
|
* This per-mapping bi_remaining increment is paired with
|
|
|
* the implicit decrement that occurs via bio_endio() in
|
|
|
- * process_prepared_discard_passdown().
|
|
|
+ * end_discard().
|
|
|
*/
|
|
|
bio_inc_remaining(bio);
|
|
|
if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list))
|
|
@@ -3850,7 +3884,7 @@ static struct target_type pool_target = {
|
|
|
.name = "thin-pool",
|
|
|
.features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
|
|
|
DM_TARGET_IMMUTABLE,
|
|
|
- .version = {1, 18, 0},
|
|
|
+ .version = {1, 19, 0},
|
|
|
.module = THIS_MODULE,
|
|
|
.ctr = pool_ctr,
|
|
|
.dtr = pool_dtr,
|
|
@@ -4224,7 +4258,7 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
|
|
|
|
|
|
static struct target_type thin_target = {
|
|
|
.name = "thin",
|
|
|
- .version = {1, 18, 0},
|
|
|
+ .version = {1, 19, 0},
|
|
|
.module = THIS_MODULE,
|
|
|
.ctr = thin_ctr,
|
|
|
.dtr = thin_dtr,
|