|
@@ -905,6 +905,33 @@ int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(dm_set_target_max_io_len);
|
|
|
|
|
|
+static long dm_blk_direct_access(struct block_device *bdev, sector_t sector,
|
|
|
+ void __pmem **kaddr, pfn_t *pfn, long size)
|
|
|
+{
|
|
|
+ struct mapped_device *md = bdev->bd_disk->private_data;
|
|
|
+ struct dm_table *map;
|
|
|
+ struct dm_target *ti;
|
|
|
+ int srcu_idx;
|
|
|
+ long len, ret = -EIO;
|
|
|
+
|
|
|
+ map = dm_get_live_table(md, &srcu_idx);
|
|
|
+ if (!map)
|
|
|
+ goto out;
|
|
|
+
|
|
|
+ ti = dm_table_find_target(map, sector);
|
|
|
+ if (!dm_target_is_valid(ti))
|
|
|
+ goto out;
|
|
|
+
|
|
|
+ len = max_io_len(sector, ti) << SECTOR_SHIFT;
|
|
|
+ size = min(len, size);
|
|
|
+
|
|
|
+ if (ti->type->direct_access)
|
|
|
+ ret = ti->type->direct_access(ti, sector, kaddr, pfn, size);
|
|
|
+out:
|
|
|
+ dm_put_live_table(md, srcu_idx);
|
|
|
+ return min(ret, size);
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* A target may call dm_accept_partial_bio only from the map routine. It is
|
|
|
* allowed for all bio types except REQ_PREFLUSH.
|
|
@@ -1548,7 +1575,7 @@ static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
|
|
|
|
|
|
if (md->bs) {
|
|
|
/* The md already has necessary mempools. */
|
|
|
- if (dm_table_get_type(t) == DM_TYPE_BIO_BASED) {
|
|
|
+ if (dm_table_bio_based(t)) {
|
|
|
/*
|
|
|
* Reload bioset because front_pad may have changed
|
|
|
* because a different table was loaded.
|
|
@@ -1744,8 +1771,9 @@ EXPORT_SYMBOL_GPL(dm_get_queue_limits);
|
|
|
int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
|
|
|
{
|
|
|
int r;
|
|
|
+ unsigned type = dm_get_md_type(md);
|
|
|
|
|
|
- switch (dm_get_md_type(md)) {
|
|
|
+ switch (type) {
|
|
|
case DM_TYPE_REQUEST_BASED:
|
|
|
r = dm_old_init_request_queue(md);
|
|
|
if (r) {
|
|
@@ -1761,6 +1789,7 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
|
|
|
}
|
|
|
break;
|
|
|
case DM_TYPE_BIO_BASED:
|
|
|
+ case DM_TYPE_DAX_BIO_BASED:
|
|
|
dm_init_normal_md_queue(md);
|
|
|
blk_queue_make_request(md->queue, dm_make_request);
|
|
|
/*
|
|
@@ -1769,6 +1798,9 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
|
|
|
*/
|
|
|
bioset_free(md->queue->bio_split);
|
|
|
md->queue->bio_split = NULL;
|
|
|
+
|
|
|
+ if (type == DM_TYPE_DAX_BIO_BASED)
|
|
|
+ queue_flag_set_unlocked(QUEUE_FLAG_DAX, md->queue);
|
|
|
break;
|
|
|
}
|
|
|
|
|
@@ -2465,6 +2497,7 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned t
|
|
|
|
|
|
switch (type) {
|
|
|
case DM_TYPE_BIO_BASED:
|
|
|
+ case DM_TYPE_DAX_BIO_BASED:
|
|
|
cachep = _io_cache;
|
|
|
pool_size = dm_get_reserved_bio_based_ios();
|
|
|
front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
|
|
@@ -2691,6 +2724,7 @@ static const struct block_device_operations dm_blk_dops = {
|
|
|
.open = dm_blk_open,
|
|
|
.release = dm_blk_close,
|
|
|
.ioctl = dm_blk_ioctl,
|
|
|
+ .direct_access = dm_blk_direct_access,
|
|
|
.getgeo = dm_blk_getgeo,
|
|
|
.pr_ops = &dm_pr_ops,
|
|
|
.owner = THIS_MODULE
|