|
@@ -1936,6 +1936,14 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
|
|
|
return DM_MAPIO_SUBMITTED;
|
|
|
}
|
|
|
|
|
|
+ /*
|
|
|
+ * We must hold the virtual cell before doing the lookup, otherwise
|
|
|
+ * there's a race with discard.
|
|
|
+ */
|
|
|
+ build_virtual_key(tc->td, block, &key);
|
|
|
+ if (dm_bio_detain(tc->pool->prison, &key, bio, &cell1, &cell_result))
|
|
|
+ return DM_MAPIO_SUBMITTED;
|
|
|
+
|
|
|
r = dm_thin_find_block(td, block, 0, &result);
|
|
|
|
|
|
/*
|
|
@@ -1959,13 +1967,10 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
|
|
|
* shared flag will be set in their case.
|
|
|
*/
|
|
|
thin_defer_bio(tc, bio);
|
|
|
+ cell_defer_no_holder_no_free(tc, &cell1);
|
|
|
return DM_MAPIO_SUBMITTED;
|
|
|
}
|
|
|
|
|
|
- build_virtual_key(tc->td, block, &key);
|
|
|
- if (dm_bio_detain(tc->pool->prison, &key, bio, &cell1, &cell_result))
|
|
|
- return DM_MAPIO_SUBMITTED;
|
|
|
-
|
|
|
build_data_key(tc->td, result.block, &key);
|
|
|
if (dm_bio_detain(tc->pool->prison, &key, bio, &cell2, &cell_result)) {
|
|
|
cell_defer_no_holder_no_free(tc, &cell1);
|
|
@@ -1986,6 +1991,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
|
|
|
* of doing so.
|
|
|
*/
|
|
|
handle_unserviceable_bio(tc->pool, bio);
|
|
|
+ cell_defer_no_holder_no_free(tc, &cell1);
|
|
|
return DM_MAPIO_SUBMITTED;
|
|
|
}
|
|
|
/* fall through */
|
|
@@ -1996,6 +2002,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
|
|
|
* provide the hint to load the metadata into cache.
|
|
|
*/
|
|
|
thin_defer_bio(tc, bio);
|
|
|
+ cell_defer_no_holder_no_free(tc, &cell1);
|
|
|
return DM_MAPIO_SUBMITTED;
|
|
|
|
|
|
default:
|
|
@@ -2005,6 +2012,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
|
|
|
* pool is switched to fail-io mode.
|
|
|
*/
|
|
|
bio_io_error(bio);
|
|
|
+ cell_defer_no_holder_no_free(tc, &cell1);
|
|
|
return DM_MAPIO_SUBMITTED;
|
|
|
}
|
|
|
}
|