|
@@ -1094,6 +1094,19 @@ static void process_prepared_discard_passdown_pt1(struct dm_thin_new_mapping *m)
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
+ /*
|
|
|
+ * Increment the unmapped blocks. This prevents a race between the
|
|
|
+ * passdown io and reallocation of freed blocks.
|
|
|
+ */
|
|
|
+ r = dm_pool_inc_data_range(pool->pmd, m->data_block, data_end);
|
|
|
+ if (r) {
|
|
|
+ metadata_operation_failed(pool, "dm_pool_inc_data_range", r);
|
|
|
+ bio_io_error(m->bio);
|
|
|
+ cell_defer_no_holder(tc, m->cell);
|
|
|
+ mempool_free(m, pool->mapping_pool);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
discard_parent = bio_alloc(GFP_NOIO, 1);
|
|
|
if (!discard_parent) {
|
|
|
DMWARN("%s: unable to allocate top level discard bio for passdown. Skipping passdown.",
|
|
@@ -1114,19 +1127,6 @@ static void process_prepared_discard_passdown_pt1(struct dm_thin_new_mapping *m)
|
|
|
end_discard(&op, r);
|
|
|
}
|
|
|
}
|
|
|
-
|
|
|
- /*
|
|
|
- * Increment the unmapped blocks. This prevents a race between the
|
|
|
- * passdown io and reallocation of freed blocks.
|
|
|
- */
|
|
|
- r = dm_pool_inc_data_range(pool->pmd, m->data_block, data_end);
|
|
|
- if (r) {
|
|
|
- metadata_operation_failed(pool, "dm_pool_inc_data_range", r);
|
|
|
- bio_io_error(m->bio);
|
|
|
- cell_defer_no_holder(tc, m->cell);
|
|
|
- mempool_free(m, pool->mapping_pool);
|
|
|
- return;
|
|
|
- }
|
|
|
}
|
|
|
|
|
|
static void process_prepared_discard_passdown_pt2(struct dm_thin_new_mapping *m)
|