|
@@ -910,6 +910,15 @@ static void dec_pending(struct dm_io *io, blk_status_t error)
|
|
|
}
|
|
}
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
|
|
+void disable_discard(struct mapped_device *md)
|
|
|
|
|
+{
|
|
|
|
|
+ struct queue_limits *limits = dm_get_queue_limits(md);
|
|
|
|
|
+
|
|
|
|
|
+ /* device doesn't really support DISCARD, disable it */
|
|
|
|
|
+ limits->max_discard_sectors = 0;
|
|
|
|
|
+ blk_queue_flag_clear(QUEUE_FLAG_DISCARD, md->queue);
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
void disable_write_same(struct mapped_device *md)
|
|
void disable_write_same(struct mapped_device *md)
|
|
|
{
|
|
{
|
|
|
struct queue_limits *limits = dm_get_queue_limits(md);
|
|
struct queue_limits *limits = dm_get_queue_limits(md);
|
|
@@ -935,11 +944,14 @@ static void clone_endio(struct bio *bio)
|
|
|
dm_endio_fn endio = tio->ti->type->end_io;
|
|
dm_endio_fn endio = tio->ti->type->end_io;
|
|
|
|
|
|
|
|
if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) {
|
|
if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) {
|
|
|
- if (bio_op(bio) == REQ_OP_WRITE_SAME &&
|
|
|
|
|
- !bio->bi_disk->queue->limits.max_write_same_sectors)
|
|
|
|
|
|
|
+ if (bio_op(bio) == REQ_OP_DISCARD &&
|
|
|
|
|
+ !bio->bi_disk->queue->limits.max_discard_sectors)
|
|
|
|
|
+ disable_discard(md);
|
|
|
|
|
+ else if (bio_op(bio) == REQ_OP_WRITE_SAME &&
|
|
|
|
|
+ !bio->bi_disk->queue->limits.max_write_same_sectors)
|
|
|
disable_write_same(md);
|
|
disable_write_same(md);
|
|
|
- if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
|
|
|
|
|
- !bio->bi_disk->queue->limits.max_write_zeroes_sectors)
|
|
|
|
|
|
|
+ else if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
|
|
|
|
|
+ !bio->bi_disk->queue->limits.max_write_zeroes_sectors)
|
|
|
disable_write_zeroes(md);
|
|
disable_write_zeroes(md);
|
|
|
}
|
|
}
|
|
|
|
|
|