|
@@ -249,7 +249,7 @@ static void scm_request_requeue(struct scm_request *scmrq)
|
|
static void scm_request_finish(struct scm_request *scmrq)
|
|
static void scm_request_finish(struct scm_request *scmrq)
|
|
{
|
|
{
|
|
struct scm_blk_dev *bdev = scmrq->bdev;
|
|
struct scm_blk_dev *bdev = scmrq->bdev;
|
|
- int *error;
|
|
|
|
|
|
+ blk_status_t *error;
|
|
int i;
|
|
int i;
|
|
|
|
|
|
for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) {
|
|
for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) {
|
|
@@ -415,7 +415,7 @@ void scm_blk_irq(struct scm_device *scmdev, void *data, blk_status_t error)
|
|
|
|
|
|
static void scm_blk_request_done(struct request *req)
|
|
static void scm_blk_request_done(struct request *req)
|
|
{
|
|
{
|
|
- int *error = blk_mq_rq_to_pdu(req);
|
|
|
|
|
|
+ blk_status_t *error = blk_mq_rq_to_pdu(req);
|
|
|
|
|
|
blk_mq_end_request(req, *error);
|
|
blk_mq_end_request(req, *error);
|
|
}
|
|
}
|
|
@@ -450,7 +450,7 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
|
|
atomic_set(&bdev->queued_reqs, 0);
|
|
atomic_set(&bdev->queued_reqs, 0);
|
|
|
|
|
|
bdev->tag_set.ops = &scm_mq_ops;
|
|
bdev->tag_set.ops = &scm_mq_ops;
|
|
- bdev->tag_set.cmd_size = sizeof(int);
|
|
|
|
|
|
+ bdev->tag_set.cmd_size = sizeof(blk_status_t);
|
|
bdev->tag_set.nr_hw_queues = nr_requests;
|
|
bdev->tag_set.nr_hw_queues = nr_requests;
|
|
bdev->tag_set.queue_depth = nr_requests_per_io * nr_requests;
|
|
bdev->tag_set.queue_depth = nr_requests_per_io * nr_requests;
|
|
bdev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
|
|
bdev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
|