|
@@ -564,8 +564,10 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
|
|
{
|
|
{
|
|
struct mmc_blk_ioc_data *idata;
|
|
struct mmc_blk_ioc_data *idata;
|
|
struct mmc_blk_data *md;
|
|
struct mmc_blk_data *md;
|
|
|
|
+ struct mmc_queue *mq;
|
|
struct mmc_card *card;
|
|
struct mmc_card *card;
|
|
int err = 0, ioc_err = 0;
|
|
int err = 0, ioc_err = 0;
|
|
|
|
+ struct request *req;
|
|
|
|
|
|
/*
|
|
/*
|
|
* The caller must have CAP_SYS_RAWIO, and must be calling this on the
|
|
* The caller must have CAP_SYS_RAWIO, and must be calling this on the
|
|
@@ -591,17 +593,18 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
|
|
goto cmd_done;
|
|
goto cmd_done;
|
|
}
|
|
}
|
|
|
|
|
|
- mmc_get_card(card);
|
|
|
|
-
|
|
|
|
- ioc_err = __mmc_blk_ioctl_cmd(card, md, idata);
|
|
|
|
-
|
|
|
|
- /* Always switch back to main area after RPMB access */
|
|
|
|
- if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
|
|
|
|
- mmc_blk_part_switch(card, dev_get_drvdata(&card->dev));
|
|
|
|
-
|
|
|
|
- mmc_put_card(card);
|
|
|
|
-
|
|
|
|
|
|
+ /*
|
|
|
|
+ * Dispatch the ioctl() into the block request queue.
|
|
|
|
+ */
|
|
|
|
+ mq = &md->queue;
|
|
|
|
+ req = blk_get_request(mq->queue,
|
|
|
|
+ idata->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN,
|
|
|
|
+ __GFP_RECLAIM);
|
|
|
|
+ req_to_mmc_queue_req(req)->idata = idata;
|
|
|
|
+ blk_execute_rq(mq->queue, NULL, req, 0);
|
|
|
|
+ ioc_err = req_to_mmc_queue_req(req)->ioc_result;
|
|
err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata);
|
|
err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata);
|
|
|
|
+ blk_put_request(req);
|
|
|
|
|
|
cmd_done:
|
|
cmd_done:
|
|
mmc_blk_put(md);
|
|
mmc_blk_put(md);
|
|
@@ -611,6 +614,31 @@ cmd_err:
|
|
return ioc_err ? ioc_err : err;
|
|
return ioc_err ? ioc_err : err;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+ * The ioctl commands come back from the block layer after it queued it and
|
|
|
|
+ * processed it with all other requests and then they get issued in this
|
|
|
|
+ * function.
|
|
|
|
+ */
|
|
|
|
+static void mmc_blk_ioctl_cmd_issue(struct mmc_queue *mq, struct request *req)
|
|
|
|
+{
|
|
|
|
+ struct mmc_queue_req *mq_rq;
|
|
|
|
+ struct mmc_blk_ioc_data *idata;
|
|
|
|
+ struct mmc_card *card = mq->card;
|
|
|
|
+ struct mmc_blk_data *md = mq->blkdata;
|
|
|
|
+ int ioc_err;
|
|
|
|
+
|
|
|
|
+ mq_rq = req_to_mmc_queue_req(req);
|
|
|
|
+ idata = mq_rq->idata;
|
|
|
|
+ ioc_err = __mmc_blk_ioctl_cmd(card, md, idata);
|
|
|
|
+ mq_rq->ioc_result = ioc_err;
|
|
|
|
+
|
|
|
|
+ /* Always switch back to main area after RPMB access */
|
|
|
|
+ if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
|
|
|
|
+ mmc_blk_part_switch(card, dev_get_drvdata(&card->dev));
|
|
|
|
+
|
|
|
|
+ blk_end_request_all(req, ioc_err);
|
|
|
|
+}
|
|
|
|
+
|
|
static int mmc_blk_ioctl_multi_cmd(struct block_device *bdev,
|
|
static int mmc_blk_ioctl_multi_cmd(struct block_device *bdev,
|
|
struct mmc_ioc_multi_cmd __user *user)
|
|
struct mmc_ioc_multi_cmd __user *user)
|
|
{
|
|
{
|
|
@@ -1854,23 +1882,54 @@ void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
|
|
goto out;
|
|
goto out;
|
|
}
|
|
}
|
|
|
|
|
|
- if (req && req_op(req) == REQ_OP_DISCARD) {
|
|
|
|
- /* complete ongoing async transfer before issuing discard */
|
|
|
|
- if (mq->qcnt)
|
|
|
|
- mmc_blk_issue_rw_rq(mq, NULL);
|
|
|
|
- mmc_blk_issue_discard_rq(mq, req);
|
|
|
|
- } else if (req && req_op(req) == REQ_OP_SECURE_ERASE) {
|
|
|
|
- /* complete ongoing async transfer before issuing secure erase*/
|
|
|
|
- if (mq->qcnt)
|
|
|
|
- mmc_blk_issue_rw_rq(mq, NULL);
|
|
|
|
- mmc_blk_issue_secdiscard_rq(mq, req);
|
|
|
|
- } else if (req && req_op(req) == REQ_OP_FLUSH) {
|
|
|
|
- /* complete ongoing async transfer before issuing flush */
|
|
|
|
- if (mq->qcnt)
|
|
|
|
- mmc_blk_issue_rw_rq(mq, NULL);
|
|
|
|
- mmc_blk_issue_flush(mq, req);
|
|
|
|
|
|
+ if (req) {
|
|
|
|
+ switch (req_op(req)) {
|
|
|
|
+ case REQ_OP_DRV_IN:
|
|
|
|
+ case REQ_OP_DRV_OUT:
|
|
|
|
+ /*
|
|
|
|
+ * Complete ongoing async transfer before issuing
|
|
|
|
+ * ioctl()s
|
|
|
|
+ */
|
|
|
|
+ if (mq->qcnt)
|
|
|
|
+ mmc_blk_issue_rw_rq(mq, NULL);
|
|
|
|
+ mmc_blk_ioctl_cmd_issue(mq, req);
|
|
|
|
+ break;
|
|
|
|
+ case REQ_OP_DISCARD:
|
|
|
|
+ /*
|
|
|
|
+ * Complete ongoing async transfer before issuing
|
|
|
|
+ * discard.
|
|
|
|
+ */
|
|
|
|
+ if (mq->qcnt)
|
|
|
|
+ mmc_blk_issue_rw_rq(mq, NULL);
|
|
|
|
+ mmc_blk_issue_discard_rq(mq, req);
|
|
|
|
+ break;
|
|
|
|
+ case REQ_OP_SECURE_ERASE:
|
|
|
|
+ /*
|
|
|
|
+ * Complete ongoing async transfer before issuing
|
|
|
|
+ * secure erase.
|
|
|
|
+ */
|
|
|
|
+ if (mq->qcnt)
|
|
|
|
+ mmc_blk_issue_rw_rq(mq, NULL);
|
|
|
|
+ mmc_blk_issue_secdiscard_rq(mq, req);
|
|
|
|
+ break;
|
|
|
|
+ case REQ_OP_FLUSH:
|
|
|
|
+ /*
|
|
|
|
+ * Complete ongoing async transfer before issuing
|
|
|
|
+ * flush.
|
|
|
|
+ */
|
|
|
|
+ if (mq->qcnt)
|
|
|
|
+ mmc_blk_issue_rw_rq(mq, NULL);
|
|
|
|
+ mmc_blk_issue_flush(mq, req);
|
|
|
|
+ break;
|
|
|
|
+ default:
|
|
|
|
+ /* Normal request, just issue it */
|
|
|
|
+ mmc_blk_issue_rw_rq(mq, req);
|
|
|
|
+ card->host->context_info.is_waiting_last_req = false;
|
|
|
|
+ break;
|
|
|
|
+ };
|
|
} else {
|
|
} else {
|
|
- mmc_blk_issue_rw_rq(mq, req);
|
|
|
|
|
|
+ /* No request, flushing the pipeline with NULL */
|
|
|
|
+ mmc_blk_issue_rw_rq(mq, NULL);
|
|
card->host->context_info.is_waiting_last_req = false;
|
|
card->host->context_info.is_waiting_last_req = false;
|
|
}
|
|
}
|
|
|
|
|