|
@@ -1758,13 +1758,12 @@ static bool dm_table_supports_write_zeroes(struct dm_table *t)
|
|
|
return true;
|
|
|
}
|
|
|
|
|
|
-
|
|
|
-static int device_discard_capable(struct dm_target *ti, struct dm_dev *dev,
|
|
|
- sector_t start, sector_t len, void *data)
|
|
|
+static int device_not_discard_capable(struct dm_target *ti, struct dm_dev *dev,
|
|
|
+ sector_t start, sector_t len, void *data)
|
|
|
{
|
|
|
struct request_queue *q = bdev_get_queue(dev->bdev);
|
|
|
|
|
|
- return q && blk_queue_discard(q);
|
|
|
+ return q && !blk_queue_discard(q);
|
|
|
}
|
|
|
|
|
|
static bool dm_table_supports_discards(struct dm_table *t)
|
|
@@ -1772,28 +1771,24 @@ static bool dm_table_supports_discards(struct dm_table *t)
|
|
|
struct dm_target *ti;
|
|
|
unsigned i;
|
|
|
|
|
|
- /*
|
|
|
- * Unless any target used by the table set discards_supported,
|
|
|
- * require at least one underlying device to support discards.
|
|
|
- * t->devices includes internal dm devices such as mirror logs
|
|
|
- * so we need to use iterate_devices here, which targets
|
|
|
- * supporting discard selectively must provide.
|
|
|
- */
|
|
|
for (i = 0; i < dm_table_get_num_targets(t); i++) {
|
|
|
ti = dm_table_get_target(t, i);
|
|
|
|
|
|
if (!ti->num_discard_bios)
|
|
|
- continue;
|
|
|
-
|
|
|
- if (ti->discards_supported)
|
|
|
- return true;
|
|
|
+ return false;
|
|
|
|
|
|
- if (ti->type->iterate_devices &&
|
|
|
- ti->type->iterate_devices(ti, device_discard_capable, NULL))
|
|
|
- return true;
|
|
|
+ /*
|
|
|
+ * Either the target provides discard support (as implied by setting
|
|
|
+ * 'discards_supported') or it relies on _all_ data devices having
|
|
|
+ * discard support.
|
|
|
+ */
|
|
|
+ if (!ti->discards_supported &&
|
|
|
+ (!ti->type->iterate_devices ||
|
|
|
+ ti->type->iterate_devices(ti, device_not_discard_capable, NULL)))
|
|
|
+ return false;
|
|
|
}
|
|
|
|
|
|
- return false;
|
|
|
+ return true;
|
|
|
}
|
|
|
|
|
|
void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
|
|
@@ -1806,9 +1801,15 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
|
|
|
*/
|
|
|
q->limits = *limits;
|
|
|
|
|
|
- if (!dm_table_supports_discards(t))
|
|
|
+ if (!dm_table_supports_discards(t)) {
|
|
|
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
|
|
|
- else
|
|
|
+ /* Must also clear discard limits... */
|
|
|
+ q->limits.max_discard_sectors = 0;
|
|
|
+ q->limits.max_hw_discard_sectors = 0;
|
|
|
+ q->limits.discard_granularity = 0;
|
|
|
+ q->limits.discard_alignment = 0;
|
|
|
+ q->limits.discard_misaligned = 0;
|
|
|
+ } else
|
|
|
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
|
|
|
|
|
|
if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_WC))) {
|