|
@@ -571,13 +571,6 @@ static struct nvm_dev_ops nvme_nvm_dev_ops = {
|
|
|
.max_phys_sect = 64,
|
|
|
};
|
|
|
|
|
|
-static void nvme_nvm_end_user_vio(struct request *rq, int error)
|
|
|
-{
|
|
|
- struct completion *waiting = rq->end_io_data;
|
|
|
-
|
|
|
- complete(waiting);
|
|
|
-}
|
|
|
-
|
|
|
static int nvme_nvm_submit_user_cmd(struct request_queue *q,
|
|
|
struct nvme_ns *ns,
|
|
|
struct nvme_nvm_command *vcmd,
|
|
@@ -608,7 +601,6 @@ static int nvme_nvm_submit_user_cmd(struct request_queue *q,
|
|
|
rq->timeout = timeout ? timeout : ADMIN_TIMEOUT;
|
|
|
|
|
|
rq->cmd_flags &= ~REQ_FAILFAST_DRIVER;
|
|
|
- rq->end_io_data = &wait;
|
|
|
|
|
|
if (ppa_buf && ppa_len) {
|
|
|
ppa_list = dma_pool_alloc(dev->dma_pool, GFP_KERNEL, &ppa_dma);
|
|
@@ -662,9 +654,7 @@ static int nvme_nvm_submit_user_cmd(struct request_queue *q,
|
|
|
}
|
|
|
|
|
|
submit:
|
|
|
- blk_execute_rq_nowait(q, NULL, rq, 0, nvme_nvm_end_user_vio);
|
|
|
-
|
|
|
- wait_for_completion_io(&wait);
|
|
|
+ blk_execute_rq(q, NULL, rq, 0);
|
|
|
|
|
|
if (nvme_req(rq)->flags & NVME_REQ_CANCELLED)
|
|
|
ret = -EINTR;
|