|
@@ -24,6 +24,7 @@
|
|
|
#include <linux/ktime.h>
|
|
|
#include <linux/elevator.h> /* for rq_end_sector() */
|
|
|
#include <linux/blk-mq.h>
|
|
|
+#include <linux/pr.h>
|
|
|
|
|
|
#include <trace/events/block.h>
|
|
|
|
|
@@ -555,18 +556,16 @@ static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
|
|
|
return dm_get_geometry(md, geo);
|
|
|
}
|
|
|
|
|
|
-static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
|
|
|
- unsigned int cmd, unsigned long arg)
|
|
|
+static int dm_get_live_table_for_ioctl(struct mapped_device *md,
|
|
|
+ struct dm_target **tgt, struct block_device **bdev,
|
|
|
+ fmode_t *mode, int *srcu_idx)
|
|
|
{
|
|
|
- struct mapped_device *md = bdev->bd_disk->private_data;
|
|
|
- int srcu_idx;
|
|
|
struct dm_table *map;
|
|
|
- struct dm_target *tgt;
|
|
|
- int r = -ENOTTY;
|
|
|
+ int r;
|
|
|
|
|
|
retry:
|
|
|
- map = dm_get_live_table(md, &srcu_idx);
|
|
|
-
|
|
|
+ r = -ENOTTY;
|
|
|
+ map = dm_get_live_table(md, srcu_idx);
|
|
|
if (!map || !dm_table_get_size(map))
|
|
|
goto out;
|
|
|
|
|
@@ -574,8 +573,9 @@ retry:
|
|
|
if (dm_table_get_num_targets(map) != 1)
|
|
|
goto out;
|
|
|
|
|
|
- tgt = dm_table_get_target(map, 0);
|
|
|
- if (!tgt->type->ioctl)
|
|
|
+ *tgt = dm_table_get_target(map, 0);
|
|
|
+
|
|
|
+ if (!(*tgt)->type->prepare_ioctl)
|
|
|
goto out;
|
|
|
|
|
|
if (dm_suspended_md(md)) {
|
|
@@ -583,16 +583,46 @@ retry:
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
- r = tgt->type->ioctl(tgt, cmd, arg);
|
|
|
+ r = (*tgt)->type->prepare_ioctl(*tgt, bdev, mode);
|
|
|
+ if (r < 0)
|
|
|
+ goto out;
|
|
|
|
|
|
-out:
|
|
|
- dm_put_live_table(md, srcu_idx);
|
|
|
+ return r;
|
|
|
|
|
|
+out:
|
|
|
+ dm_put_live_table(md, *srcu_idx);
|
|
|
if (r == -ENOTCONN) {
|
|
|
msleep(10);
|
|
|
goto retry;
|
|
|
}
|
|
|
+ return r;
|
|
|
+}
|
|
|
+
|
|
|
+static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
|
|
|
+ unsigned int cmd, unsigned long arg)
|
|
|
+{
|
|
|
+ struct mapped_device *md = bdev->bd_disk->private_data;
|
|
|
+ struct dm_target *tgt;
|
|
|
+ int srcu_idx, r;
|
|
|
+
|
|
|
+ r = dm_get_live_table_for_ioctl(md, &tgt, &bdev, &mode, &srcu_idx);
|
|
|
+ if (r < 0)
|
|
|
+ return r;
|
|
|
|
|
|
+ if (r > 0) {
|
|
|
+ /*
|
|
|
+ * Target determined this ioctl is being issued against
|
|
|
+ * a logical partition of the parent bdev; so extra
|
|
|
+ * validation is needed.
|
|
|
+ */
|
|
|
+ r = scsi_verify_blk_ioctl(NULL, cmd);
|
|
|
+ if (r)
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+
|
|
|
+ r = __blkdev_driver_ioctl(bdev, mode, cmd, arg);
|
|
|
+out:
|
|
|
+ dm_put_live_table(md, srcu_idx);
|
|
|
return r;
|
|
|
}
|
|
|
|
|
@@ -1734,8 +1764,6 @@ static void dm_make_request(struct request_queue *q, struct bio *bio)
|
|
|
|
|
|
map = dm_get_live_table(md, &srcu_idx);
|
|
|
|
|
|
- blk_queue_split(q, &bio, q->bio_split);
|
|
|
-
|
|
|
generic_start_io_acct(rw, bio_sectors(bio), &dm_disk(md)->part0);
|
|
|
|
|
|
/* if we're suspended, we have to queue this io for later */
|
|
@@ -2198,6 +2226,13 @@ static void dm_init_md_queue(struct mapped_device *md)
|
|
|
* This queue is new, so no concurrency on the queue_flags.
|
|
|
*/
|
|
|
queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Initialize data that will only be used by a non-blk-mq DM queue
|
|
|
+ * - must do so here (in alloc_dev callchain) before queue is used
|
|
|
+ */
|
|
|
+ md->queue->queuedata = md;
|
|
|
+ md->queue->backing_dev_info.congested_data = md;
|
|
|
}
|
|
|
|
|
|
static void dm_init_old_md_queue(struct mapped_device *md)
|
|
@@ -2208,10 +2243,7 @@ static void dm_init_old_md_queue(struct mapped_device *md)
|
|
|
/*
|
|
|
* Initialize aspects of queue that aren't relevant for blk-mq
|
|
|
*/
|
|
|
- md->queue->queuedata = md;
|
|
|
md->queue->backing_dev_info.congested_fn = dm_any_congested;
|
|
|
- md->queue->backing_dev_info.congested_data = md;
|
|
|
-
|
|
|
blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
|
|
|
}
|
|
|
|
|
@@ -2221,10 +2253,8 @@ static void cleanup_mapped_device(struct mapped_device *md)
|
|
|
destroy_workqueue(md->wq);
|
|
|
if (md->kworker_task)
|
|
|
kthread_stop(md->kworker_task);
|
|
|
- if (md->io_pool)
|
|
|
- mempool_destroy(md->io_pool);
|
|
|
- if (md->rq_pool)
|
|
|
- mempool_destroy(md->rq_pool);
|
|
|
+ mempool_destroy(md->io_pool);
|
|
|
+ mempool_destroy(md->rq_pool);
|
|
|
if (md->bs)
|
|
|
bioset_free(md->bs);
|
|
|
|
|
@@ -2759,6 +2789,12 @@ int dm_setup_md_queue(struct mapped_device *md)
|
|
|
case DM_TYPE_BIO_BASED:
|
|
|
dm_init_old_md_queue(md);
|
|
|
blk_queue_make_request(md->queue, dm_make_request);
|
|
|
+ /*
|
|
|
+ * DM handles splitting bios as needed. Free the bio_split bioset
|
|
|
+ * since it won't be used (saves 1 process per bio-based DM device).
|
|
|
+ */
|
|
|
+ bioset_free(md->queue->bio_split);
|
|
|
+ md->queue->bio_split = NULL;
|
|
|
break;
|
|
|
}
|
|
|
|
|
@@ -3505,11 +3541,8 @@ void dm_free_md_mempools(struct dm_md_mempools *pools)
|
|
|
if (!pools)
|
|
|
return;
|
|
|
|
|
|
- if (pools->io_pool)
|
|
|
- mempool_destroy(pools->io_pool);
|
|
|
-
|
|
|
- if (pools->rq_pool)
|
|
|
- mempool_destroy(pools->rq_pool);
|
|
|
+ mempool_destroy(pools->io_pool);
|
|
|
+ mempool_destroy(pools->rq_pool);
|
|
|
|
|
|
if (pools->bs)
|
|
|
bioset_free(pools->bs);
|
|
@@ -3517,11 +3550,133 @@ void dm_free_md_mempools(struct dm_md_mempools *pools)
|
|
|
kfree(pools);
|
|
|
}
|
|
|
|
|
|
+static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key,
|
|
|
+ u32 flags)
|
|
|
+{
|
|
|
+ struct mapped_device *md = bdev->bd_disk->private_data;
|
|
|
+ const struct pr_ops *ops;
|
|
|
+ struct dm_target *tgt;
|
|
|
+ fmode_t mode;
|
|
|
+ int srcu_idx, r;
|
|
|
+
|
|
|
+ r = dm_get_live_table_for_ioctl(md, &tgt, &bdev, &mode, &srcu_idx);
|
|
|
+ if (r < 0)
|
|
|
+ return r;
|
|
|
+
|
|
|
+ ops = bdev->bd_disk->fops->pr_ops;
|
|
|
+ if (ops && ops->pr_register)
|
|
|
+ r = ops->pr_register(bdev, old_key, new_key, flags);
|
|
|
+ else
|
|
|
+ r = -EOPNOTSUPP;
|
|
|
+
|
|
|
+ dm_put_live_table(md, srcu_idx);
|
|
|
+ return r;
|
|
|
+}
|
|
|
+
|
|
|
+static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type,
|
|
|
+ u32 flags)
|
|
|
+{
|
|
|
+ struct mapped_device *md = bdev->bd_disk->private_data;
|
|
|
+ const struct pr_ops *ops;
|
|
|
+ struct dm_target *tgt;
|
|
|
+ fmode_t mode;
|
|
|
+ int srcu_idx, r;
|
|
|
+
|
|
|
+ r = dm_get_live_table_for_ioctl(md, &tgt, &bdev, &mode, &srcu_idx);
|
|
|
+ if (r < 0)
|
|
|
+ return r;
|
|
|
+
|
|
|
+ ops = bdev->bd_disk->fops->pr_ops;
|
|
|
+ if (ops && ops->pr_reserve)
|
|
|
+ r = ops->pr_reserve(bdev, key, type, flags);
|
|
|
+ else
|
|
|
+ r = -EOPNOTSUPP;
|
|
|
+
|
|
|
+ dm_put_live_table(md, srcu_idx);
|
|
|
+ return r;
|
|
|
+}
|
|
|
+
|
|
|
+static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
|
|
|
+{
|
|
|
+ struct mapped_device *md = bdev->bd_disk->private_data;
|
|
|
+ const struct pr_ops *ops;
|
|
|
+ struct dm_target *tgt;
|
|
|
+ fmode_t mode;
|
|
|
+ int srcu_idx, r;
|
|
|
+
|
|
|
+ r = dm_get_live_table_for_ioctl(md, &tgt, &bdev, &mode, &srcu_idx);
|
|
|
+ if (r < 0)
|
|
|
+ return r;
|
|
|
+
|
|
|
+ ops = bdev->bd_disk->fops->pr_ops;
|
|
|
+ if (ops && ops->pr_release)
|
|
|
+ r = ops->pr_release(bdev, key, type);
|
|
|
+ else
|
|
|
+ r = -EOPNOTSUPP;
|
|
|
+
|
|
|
+ dm_put_live_table(md, srcu_idx);
|
|
|
+ return r;
|
|
|
+}
|
|
|
+
|
|
|
+static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key,
|
|
|
+ enum pr_type type, bool abort)
|
|
|
+{
|
|
|
+ struct mapped_device *md = bdev->bd_disk->private_data;
|
|
|
+ const struct pr_ops *ops;
|
|
|
+ struct dm_target *tgt;
|
|
|
+ fmode_t mode;
|
|
|
+ int srcu_idx, r;
|
|
|
+
|
|
|
+ r = dm_get_live_table_for_ioctl(md, &tgt, &bdev, &mode, &srcu_idx);
|
|
|
+ if (r < 0)
|
|
|
+ return r;
|
|
|
+
|
|
|
+ ops = bdev->bd_disk->fops->pr_ops;
|
|
|
+ if (ops && ops->pr_preempt)
|
|
|
+ r = ops->pr_preempt(bdev, old_key, new_key, type, abort);
|
|
|
+ else
|
|
|
+ r = -EOPNOTSUPP;
|
|
|
+
|
|
|
+ dm_put_live_table(md, srcu_idx);
|
|
|
+ return r;
|
|
|
+}
|
|
|
+
|
|
|
+static int dm_pr_clear(struct block_device *bdev, u64 key)
|
|
|
+{
|
|
|
+ struct mapped_device *md = bdev->bd_disk->private_data;
|
|
|
+ const struct pr_ops *ops;
|
|
|
+ struct dm_target *tgt;
|
|
|
+ fmode_t mode;
|
|
|
+ int srcu_idx, r;
|
|
|
+
|
|
|
+ r = dm_get_live_table_for_ioctl(md, &tgt, &bdev, &mode, &srcu_idx);
|
|
|
+ if (r < 0)
|
|
|
+ return r;
|
|
|
+
|
|
|
+ ops = bdev->bd_disk->fops->pr_ops;
|
|
|
+ if (ops && ops->pr_clear)
|
|
|
+ r = ops->pr_clear(bdev, key);
|
|
|
+ else
|
|
|
+ r = -EOPNOTSUPP;
|
|
|
+
|
|
|
+ dm_put_live_table(md, srcu_idx);
|
|
|
+ return r;
|
|
|
+}
|
|
|
+
|
|
|
+static const struct pr_ops dm_pr_ops = {
|
|
|
+ .pr_register = dm_pr_register,
|
|
|
+ .pr_reserve = dm_pr_reserve,
|
|
|
+ .pr_release = dm_pr_release,
|
|
|
+ .pr_preempt = dm_pr_preempt,
|
|
|
+ .pr_clear = dm_pr_clear,
|
|
|
+};
|
|
|
+
|
|
|
static const struct block_device_operations dm_blk_dops = {
|
|
|
.open = dm_blk_open,
|
|
|
.release = dm_blk_close,
|
|
|
.ioctl = dm_blk_ioctl,
|
|
|
.getgeo = dm_blk_getgeo,
|
|
|
+ .pr_ops = &dm_pr_ops,
|
|
|
.owner = THIS_MODULE
|
|
|
};
|
|
|
|