|
@@ -40,35 +40,6 @@ static int mmc_prep_request(struct request_queue *q, struct request *req)
|
|
|
return BLKPREP_OK;
|
|
|
}
|
|
|
|
|
|
-struct mmc_queue_req *mmc_queue_req_find(struct mmc_queue *mq,
|
|
|
- struct request *req)
|
|
|
-{
|
|
|
- struct mmc_queue_req *mqrq;
|
|
|
- int i = ffz(mq->qslots);
|
|
|
-
|
|
|
- if (i >= mq->qdepth)
|
|
|
- return NULL;
|
|
|
-
|
|
|
- mqrq = &mq->mqrq[i];
|
|
|
- WARN_ON(mqrq->req || mq->qcnt >= mq->qdepth ||
|
|
|
- test_bit(mqrq->task_id, &mq->qslots));
|
|
|
- mqrq->req = req;
|
|
|
- mq->qcnt += 1;
|
|
|
- __set_bit(mqrq->task_id, &mq->qslots);
|
|
|
-
|
|
|
- return mqrq;
|
|
|
-}
|
|
|
-
|
|
|
-void mmc_queue_req_free(struct mmc_queue *mq,
|
|
|
- struct mmc_queue_req *mqrq)
|
|
|
-{
|
|
|
- WARN_ON(!mqrq->req || mq->qcnt < 1 ||
|
|
|
- !test_bit(mqrq->task_id, &mq->qslots));
|
|
|
- mqrq->req = NULL;
|
|
|
- mq->qcnt -= 1;
|
|
|
- __clear_bit(mqrq->task_id, &mq->qslots);
|
|
|
-}
|
|
|
-
|
|
|
static int mmc_queue_thread(void *d)
|
|
|
{
|
|
|
struct mmc_queue *mq = d;
|
|
@@ -149,11 +120,11 @@ static void mmc_request_fn(struct request_queue *q)
|
|
|
wake_up_process(mq->thread);
|
|
|
}
|
|
|
|
|
|
-static struct scatterlist *mmc_alloc_sg(int sg_len)
|
|
|
+static struct scatterlist *mmc_alloc_sg(int sg_len, gfp_t gfp)
|
|
|
{
|
|
|
struct scatterlist *sg;
|
|
|
|
|
|
- sg = kmalloc_array(sg_len, sizeof(*sg), GFP_KERNEL);
|
|
|
+ sg = kmalloc_array(sg_len, sizeof(*sg), gfp);
|
|
|
if (sg)
|
|
|
sg_init_table(sg, sg_len);
|
|
|
|
|
@@ -179,80 +150,6 @@ static void mmc_queue_setup_discard(struct request_queue *q,
|
|
|
queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q);
|
|
|
}
|
|
|
|
|
|
-static void mmc_queue_req_free_bufs(struct mmc_queue_req *mqrq)
|
|
|
-{
|
|
|
- kfree(mqrq->bounce_sg);
|
|
|
- mqrq->bounce_sg = NULL;
|
|
|
-
|
|
|
- kfree(mqrq->sg);
|
|
|
- mqrq->sg = NULL;
|
|
|
-
|
|
|
- kfree(mqrq->bounce_buf);
|
|
|
- mqrq->bounce_buf = NULL;
|
|
|
-}
|
|
|
-
|
|
|
-static void mmc_queue_reqs_free_bufs(struct mmc_queue_req *mqrq, int qdepth)
|
|
|
-{
|
|
|
- int i;
|
|
|
-
|
|
|
- for (i = 0; i < qdepth; i++)
|
|
|
- mmc_queue_req_free_bufs(&mqrq[i]);
|
|
|
-}
|
|
|
-
|
|
|
-static void mmc_queue_free_mqrqs(struct mmc_queue_req *mqrq, int qdepth)
|
|
|
-{
|
|
|
- mmc_queue_reqs_free_bufs(mqrq, qdepth);
|
|
|
- kfree(mqrq);
|
|
|
-}
|
|
|
-
|
|
|
-static struct mmc_queue_req *mmc_queue_alloc_mqrqs(int qdepth)
|
|
|
-{
|
|
|
- struct mmc_queue_req *mqrq;
|
|
|
- int i;
|
|
|
-
|
|
|
- mqrq = kcalloc(qdepth, sizeof(*mqrq), GFP_KERNEL);
|
|
|
- if (mqrq) {
|
|
|
- for (i = 0; i < qdepth; i++)
|
|
|
- mqrq[i].task_id = i;
|
|
|
- }
|
|
|
-
|
|
|
- return mqrq;
|
|
|
-}
|
|
|
-
|
|
|
-static int mmc_queue_alloc_bounce_bufs(struct mmc_queue_req *mqrq, int qdepth,
|
|
|
- unsigned int bouncesz)
|
|
|
-{
|
|
|
- int i;
|
|
|
-
|
|
|
- for (i = 0; i < qdepth; i++) {
|
|
|
- mqrq[i].bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
|
|
|
- if (!mqrq[i].bounce_buf)
|
|
|
- return -ENOMEM;
|
|
|
-
|
|
|
- mqrq[i].sg = mmc_alloc_sg(1);
|
|
|
- if (!mqrq[i].sg)
|
|
|
- return -ENOMEM;
|
|
|
-
|
|
|
- mqrq[i].bounce_sg = mmc_alloc_sg(bouncesz / 512);
|
|
|
- if (!mqrq[i].bounce_sg)
|
|
|
- return -ENOMEM;
|
|
|
- }
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-static bool mmc_queue_alloc_bounce(struct mmc_queue_req *mqrq, int qdepth,
|
|
|
- unsigned int bouncesz)
|
|
|
-{
|
|
|
- int ret;
|
|
|
-
|
|
|
- ret = mmc_queue_alloc_bounce_bufs(mqrq, qdepth, bouncesz);
|
|
|
- if (ret)
|
|
|
- mmc_queue_reqs_free_bufs(mqrq, qdepth);
|
|
|
-
|
|
|
- return !ret;
|
|
|
-}
|
|
|
-
|
|
|
static unsigned int mmc_queue_calc_bouncesz(struct mmc_host *host)
|
|
|
{
|
|
|
unsigned int bouncesz = MMC_QUEUE_BOUNCESZ;
|
|
@@ -273,71 +170,61 @@ static unsigned int mmc_queue_calc_bouncesz(struct mmc_host *host)
|
|
|
return bouncesz;
|
|
|
}
|
|
|
|
|
|
-static int mmc_queue_alloc_sgs(struct mmc_queue_req *mqrq, int qdepth,
|
|
|
- int max_segs)
|
|
|
+/**
|
|
|
+ * mmc_init_request() - initialize the MMC-specific per-request data
|
|
|
+ * @q: the request queue
|
|
|
+ * @req: the request
|
|
|
+ * @gfp: memory allocation policy
|
|
|
+ */
|
|
|
+static int mmc_init_request(struct request_queue *q, struct request *req,
|
|
|
+ gfp_t gfp)
|
|
|
{
|
|
|
- int i;
|
|
|
+ struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req);
|
|
|
+ struct mmc_queue *mq = q->queuedata;
|
|
|
+ struct mmc_card *card = mq->card;
|
|
|
+ struct mmc_host *host = card->host;
|
|
|
|
|
|
- for (i = 0; i < qdepth; i++) {
|
|
|
- mqrq[i].sg = mmc_alloc_sg(max_segs);
|
|
|
- if (!mqrq[i].sg)
|
|
|
+ mq_rq->req = req;
|
|
|
+
|
|
|
+ if (card->bouncesz) {
|
|
|
+ mq_rq->bounce_buf = kmalloc(card->bouncesz, gfp);
|
|
|
+ if (!mq_rq->bounce_buf)
|
|
|
+ return -ENOMEM;
|
|
|
+ if (card->bouncesz > 512) {
|
|
|
+ mq_rq->sg = mmc_alloc_sg(1, gfp);
|
|
|
+ if (!mq_rq->sg)
|
|
|
+ return -ENOMEM;
|
|
|
+ mq_rq->bounce_sg = mmc_alloc_sg(card->bouncesz / 512,
|
|
|
+ gfp);
|
|
|
+ if (!mq_rq->bounce_sg)
|
|
|
+ return -ENOMEM;
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ mq_rq->bounce_buf = NULL;
|
|
|
+ mq_rq->bounce_sg = NULL;
|
|
|
+ mq_rq->sg = mmc_alloc_sg(host->max_segs, gfp);
|
|
|
+ if (!mq_rq->sg)
|
|
|
return -ENOMEM;
|
|
|
}
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-void mmc_queue_free_shared_queue(struct mmc_card *card)
|
|
|
-{
|
|
|
- if (card->mqrq) {
|
|
|
- mmc_queue_free_mqrqs(card->mqrq, card->qdepth);
|
|
|
- card->mqrq = NULL;
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-static int __mmc_queue_alloc_shared_queue(struct mmc_card *card, int qdepth)
|
|
|
+static void mmc_exit_request(struct request_queue *q, struct request *req)
|
|
|
{
|
|
|
- struct mmc_host *host = card->host;
|
|
|
- struct mmc_queue_req *mqrq;
|
|
|
- unsigned int bouncesz;
|
|
|
- int ret = 0;
|
|
|
-
|
|
|
- if (card->mqrq)
|
|
|
- return -EINVAL;
|
|
|
+ struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req);
|
|
|
|
|
|
- mqrq = mmc_queue_alloc_mqrqs(qdepth);
|
|
|
- if (!mqrq)
|
|
|
- return -ENOMEM;
|
|
|
-
|
|
|
- card->mqrq = mqrq;
|
|
|
- card->qdepth = qdepth;
|
|
|
+ /* It is OK to kfree(NULL) so this will be smooth */
|
|
|
+ kfree(mq_rq->bounce_sg);
|
|
|
+ mq_rq->bounce_sg = NULL;
|
|
|
|
|
|
- bouncesz = mmc_queue_calc_bouncesz(host);
|
|
|
+ kfree(mq_rq->bounce_buf);
|
|
|
+ mq_rq->bounce_buf = NULL;
|
|
|
|
|
|
- if (bouncesz && !mmc_queue_alloc_bounce(mqrq, qdepth, bouncesz)) {
|
|
|
- bouncesz = 0;
|
|
|
- pr_warn("%s: unable to allocate bounce buffers\n",
|
|
|
- mmc_card_name(card));
|
|
|
- }
|
|
|
+ kfree(mq_rq->sg);
|
|
|
+ mq_rq->sg = NULL;
|
|
|
|
|
|
- card->bouncesz = bouncesz;
|
|
|
-
|
|
|
- if (!bouncesz) {
|
|
|
- ret = mmc_queue_alloc_sgs(mqrq, qdepth, host->max_segs);
|
|
|
- if (ret)
|
|
|
- goto out_err;
|
|
|
- }
|
|
|
-
|
|
|
- return ret;
|
|
|
-
|
|
|
-out_err:
|
|
|
- mmc_queue_free_shared_queue(card);
|
|
|
- return ret;
|
|
|
-}
|
|
|
-
|
|
|
-int mmc_queue_alloc_shared_queue(struct mmc_card *card)
|
|
|
-{
|
|
|
- return __mmc_queue_alloc_shared_queue(card, 2);
|
|
|
+ mq_rq->req = NULL;
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -360,13 +247,21 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
|
|
|
limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
|
|
|
|
|
|
mq->card = card;
|
|
|
- mq->queue = blk_init_queue(mmc_request_fn, lock);
|
|
|
+ mq->queue = blk_alloc_queue(GFP_KERNEL);
|
|
|
if (!mq->queue)
|
|
|
return -ENOMEM;
|
|
|
-
|
|
|
- mq->mqrq = card->mqrq;
|
|
|
- mq->qdepth = card->qdepth;
|
|
|
+ mq->queue->queue_lock = lock;
|
|
|
+ mq->queue->request_fn = mmc_request_fn;
|
|
|
+ mq->queue->init_rq_fn = mmc_init_request;
|
|
|
+ mq->queue->exit_rq_fn = mmc_exit_request;
|
|
|
+ mq->queue->cmd_size = sizeof(struct mmc_queue_req);
|
|
|
mq->queue->queuedata = mq;
|
|
|
+ mq->qcnt = 0;
|
|
|
+ ret = blk_init_allocated_queue(mq->queue);
|
|
|
+ if (ret) {
|
|
|
+ blk_cleanup_queue(mq->queue);
|
|
|
+ return ret;
|
|
|
+ }
|
|
|
|
|
|
blk_queue_prep_rq(mq->queue, mmc_prep_request);
|
|
|
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
|
|
@@ -374,6 +269,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
|
|
|
if (mmc_can_erase(card))
|
|
|
mmc_queue_setup_discard(mq->queue, card);
|
|
|
|
|
|
+ card->bouncesz = mmc_queue_calc_bouncesz(host);
|
|
|
if (card->bouncesz) {
|
|
|
blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
|
|
|
blk_queue_max_hw_sectors(mq->queue, card->bouncesz / 512);
|
|
@@ -400,7 +296,6 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
|
|
|
return 0;
|
|
|
|
|
|
cleanup_queue:
|
|
|
- mq->mqrq = NULL;
|
|
|
blk_cleanup_queue(mq->queue);
|
|
|
return ret;
|
|
|
}
|
|
@@ -422,7 +317,6 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
|
|
|
blk_start_queue(q);
|
|
|
spin_unlock_irqrestore(q->queue_lock, flags);
|
|
|
|
|
|
- mq->mqrq = NULL;
|
|
|
mq->card = NULL;
|
|
|
}
|
|
|
EXPORT_SYMBOL(mmc_cleanup_queue);
|