|
@@ -755,6 +755,14 @@ static void dec_pending(struct dm_io *io, int error)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+static void disable_write_same(struct mapped_device *md)
|
|
|
+{
|
|
|
+ struct queue_limits *limits = dm_get_queue_limits(md);
|
|
|
+
|
|
|
+ /* device doesn't really support WRITE SAME, disable it */
|
|
|
+ limits->max_write_same_sectors = 0;
|
|
|
+}
|
|
|
+
|
|
|
static void clone_endio(struct bio *bio, int error)
|
|
|
{
|
|
|
int r = 0;
|
|
@@ -783,6 +791,10 @@ static void clone_endio(struct bio *bio, int error)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+ if (unlikely(r == -EREMOTEIO && (bio->bi_rw & REQ_WRITE_SAME) &&
|
|
|
+ !bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors))
|
|
|
+ disable_write_same(md);
|
|
|
+
|
|
|
free_tio(md, tio);
|
|
|
dec_pending(io, error);
|
|
|
}
|
|
@@ -977,6 +989,10 @@ static void dm_done(struct request *clone, int error, bool mapped)
|
|
|
r = rq_end_io(tio->ti, clone, error, &tio->info);
|
|
|
}
|
|
|
|
|
|
+ if (unlikely(r == -EREMOTEIO && (clone->cmd_flags & REQ_WRITE_SAME) &&
|
|
|
+ !clone->q->limits.max_write_same_sectors))
|
|
|
+ disable_write_same(tio->md);
|
|
|
+
|
|
|
if (r <= 0)
|
|
|
/* The target wants to complete the I/O */
|
|
|
dm_end_request(clone, r);
|
|
@@ -1110,6 +1126,46 @@ int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(dm_set_target_max_io_len);
|
|
|
|
|
|
+/*
|
|
|
+ * A target may call dm_accept_partial_bio only from the map routine. It is
|
|
|
+ * allowed for all bio types except REQ_FLUSH.
|
|
|
+ *
|
|
|
+ * dm_accept_partial_bio informs the dm that the target only wants to process
|
|
|
+ * additional n_sectors sectors of the bio and the rest of the data should be
|
|
|
+ * sent in a next bio.
|
|
|
+ *
|
|
|
+ * A diagram that explains the arithmetics:
|
|
|
+ * +--------------------+---------------+-------+
|
|
|
+ * | 1 | 2 | 3 |
|
|
|
+ * +--------------------+---------------+-------+
|
|
|
+ *
|
|
|
+ * <-------------- *tio->len_ptr --------------->
|
|
|
+ * <------- bi_size ------->
|
|
|
+ * <-- n_sectors -->
|
|
|
+ *
|
|
|
+ * Region 1 was already iterated over with bio_advance or similar function.
|
|
|
+ * (it may be empty if the target doesn't use bio_advance)
|
|
|
+ * Region 2 is the remaining bio size that the target wants to process.
|
|
|
+ * (it may be empty if region 1 is non-empty, although there is no reason
|
|
|
+ * to make it empty)
|
|
|
+ * The target requires that region 3 is to be sent in the next bio.
|
|
|
+ *
|
|
|
+ * If the target wants to receive multiple copies of the bio (via num_*bios, etc),
|
|
|
+ * the partially processed part (the sum of regions 1+2) must be the same for all
|
|
|
+ * copies of the bio.
|
|
|
+ */
|
|
|
+void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
|
|
|
+{
|
|
|
+ struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
|
|
|
+ unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT;
|
|
|
+ BUG_ON(bio->bi_rw & REQ_FLUSH);
|
|
|
+ BUG_ON(bi_size > *tio->len_ptr);
|
|
|
+ BUG_ON(n_sectors > bi_size);
|
|
|
+ *tio->len_ptr -= bi_size - n_sectors;
|
|
|
+ bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(dm_accept_partial_bio);
|
|
|
+
|
|
|
static void __map_bio(struct dm_target_io *tio)
|
|
|
{
|
|
|
int r;
|
|
@@ -1152,10 +1208,10 @@ struct clone_info {
|
|
|
struct bio *bio;
|
|
|
struct dm_io *io;
|
|
|
sector_t sector;
|
|
|
- sector_t sector_count;
|
|
|
+ unsigned sector_count;
|
|
|
};
|
|
|
|
|
|
-static void bio_setup_sector(struct bio *bio, sector_t sector, sector_t len)
|
|
|
+static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len)
|
|
|
{
|
|
|
bio->bi_iter.bi_sector = sector;
|
|
|
bio->bi_iter.bi_size = to_bytes(len);
|
|
@@ -1200,11 +1256,13 @@ static struct dm_target_io *alloc_tio(struct clone_info *ci,
|
|
|
|
|
|
static void __clone_and_map_simple_bio(struct clone_info *ci,
|
|
|
struct dm_target *ti,
|
|
|
- unsigned target_bio_nr, sector_t len)
|
|
|
+ unsigned target_bio_nr, unsigned *len)
|
|
|
{
|
|
|
struct dm_target_io *tio = alloc_tio(ci, ti, ci->bio->bi_max_vecs, target_bio_nr);
|
|
|
struct bio *clone = &tio->clone;
|
|
|
|
|
|
+ tio->len_ptr = len;
|
|
|
+
|
|
|
/*
|
|
|
* Discard requests require the bio's inline iovecs be initialized.
|
|
|
* ci->bio->bi_max_vecs is BIO_INLINE_VECS anyway, for both flush
|
|
@@ -1212,13 +1270,13 @@ static void __clone_and_map_simple_bio(struct clone_info *ci,
|
|
|
*/
|
|
|
__bio_clone_fast(clone, ci->bio);
|
|
|
if (len)
|
|
|
- bio_setup_sector(clone, ci->sector, len);
|
|
|
+ bio_setup_sector(clone, ci->sector, *len);
|
|
|
|
|
|
__map_bio(tio);
|
|
|
}
|
|
|
|
|
|
static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
|
|
|
- unsigned num_bios, sector_t len)
|
|
|
+ unsigned num_bios, unsigned *len)
|
|
|
{
|
|
|
unsigned target_bio_nr;
|
|
|
|
|
@@ -1233,13 +1291,13 @@ static int __send_empty_flush(struct clone_info *ci)
|
|
|
|
|
|
BUG_ON(bio_has_data(ci->bio));
|
|
|
while ((ti = dm_table_get_target(ci->map, target_nr++)))
|
|
|
- __send_duplicate_bios(ci, ti, ti->num_flush_bios, 0);
|
|
|
+ __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL);
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
static void __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
|
|
|
- sector_t sector, unsigned len)
|
|
|
+ sector_t sector, unsigned *len)
|
|
|
{
|
|
|
struct bio *bio = ci->bio;
|
|
|
struct dm_target_io *tio;
|
|
@@ -1254,7 +1312,8 @@ static void __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti
|
|
|
|
|
|
for (target_bio_nr = 0; target_bio_nr < num_target_bios; target_bio_nr++) {
|
|
|
tio = alloc_tio(ci, ti, 0, target_bio_nr);
|
|
|
- clone_bio(tio, bio, sector, len);
|
|
|
+ tio->len_ptr = len;
|
|
|
+ clone_bio(tio, bio, sector, *len);
|
|
|
__map_bio(tio);
|
|
|
}
|
|
|
}
|
|
@@ -1283,7 +1342,7 @@ static int __send_changing_extent_only(struct clone_info *ci,
|
|
|
is_split_required_fn is_split_required)
|
|
|
{
|
|
|
struct dm_target *ti;
|
|
|
- sector_t len;
|
|
|
+ unsigned len;
|
|
|
unsigned num_bios;
|
|
|
|
|
|
do {
|
|
@@ -1302,11 +1361,11 @@ static int __send_changing_extent_only(struct clone_info *ci,
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
if (is_split_required && !is_split_required(ti))
|
|
|
- len = min(ci->sector_count, max_io_len_target_boundary(ci->sector, ti));
|
|
|
+ len = min((sector_t)ci->sector_count, max_io_len_target_boundary(ci->sector, ti));
|
|
|
else
|
|
|
- len = min(ci->sector_count, max_io_len(ci->sector, ti));
|
|
|
+ len = min((sector_t)ci->sector_count, max_io_len(ci->sector, ti));
|
|
|
|
|
|
- __send_duplicate_bios(ci, ti, num_bios, len);
|
|
|
+ __send_duplicate_bios(ci, ti, num_bios, &len);
|
|
|
|
|
|
ci->sector += len;
|
|
|
} while (ci->sector_count -= len);
|
|
@@ -1345,7 +1404,7 @@ static int __split_and_process_non_flush(struct clone_info *ci)
|
|
|
|
|
|
len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count);
|
|
|
|
|
|
- __clone_and_map_data_bio(ci, ti, ci->sector, len);
|
|
|
+ __clone_and_map_data_bio(ci, ti, ci->sector, &len);
|
|
|
|
|
|
ci->sector += len;
|
|
|
ci->sector_count -= len;
|
|
@@ -1439,7 +1498,6 @@ static int dm_merge_bvec(struct request_queue *q,
|
|
|
* just one page.
|
|
|
*/
|
|
|
else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9)
|
|
|
-
|
|
|
max_size = 0;
|
|
|
|
|
|
out:
|