|
@@ -252,6 +252,27 @@ static int mmc_queue_alloc_sgs(struct mmc_queue *mq, int max_segs)
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+static void mmc_queue_reqs_free_bufs(struct mmc_queue *mq)
|
|
|
+{
|
|
|
+ struct mmc_queue_req *mqrq_cur = mq->mqrq_cur;
|
|
|
+ struct mmc_queue_req *mqrq_prev = mq->mqrq_prev;
|
|
|
+
|
|
|
+ kfree(mqrq_cur->bounce_sg);
|
|
|
+ mqrq_cur->bounce_sg = NULL;
|
|
|
+ kfree(mqrq_prev->bounce_sg);
|
|
|
+ mqrq_prev->bounce_sg = NULL;
|
|
|
+
|
|
|
+ kfree(mqrq_cur->sg);
|
|
|
+ mqrq_cur->sg = NULL;
|
|
|
+ kfree(mqrq_cur->bounce_buf);
|
|
|
+ mqrq_cur->bounce_buf = NULL;
|
|
|
+
|
|
|
+ kfree(mqrq_prev->sg);
|
|
|
+ mqrq_prev->sg = NULL;
|
|
|
+ kfree(mqrq_prev->bounce_buf);
|
|
|
+ mqrq_prev->bounce_buf = NULL;
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* mmc_init_queue - initialise a queue structure.
|
|
|
* @mq: mmc queue
|
|
@@ -268,8 +289,6 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
|
|
|
u64 limit = BLK_BOUNCE_HIGH;
|
|
|
bool bounce = false;
|
|
|
int ret;
|
|
|
- struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
|
|
|
- struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
|
|
|
|
|
|
if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
|
|
|
limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
|
|
@@ -279,8 +298,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
|
|
|
if (!mq->queue)
|
|
|
return -ENOMEM;
|
|
|
|
|
|
- mq->mqrq_cur = mqrq_cur;
|
|
|
- mq->mqrq_prev = mqrq_prev;
|
|
|
+ mq->mqrq_cur = &mq->mqrq[0];
|
|
|
+ mq->mqrq_prev = &mq->mqrq[1];
|
|
|
mq->queue->queuedata = mq;
|
|
|
|
|
|
blk_queue_prep_rq(mq->queue, mmc_prep_request);
|
|
@@ -336,27 +355,13 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
|
|
|
|
|
|
if (IS_ERR(mq->thread)) {
|
|
|
ret = PTR_ERR(mq->thread);
|
|
|
- goto free_bounce_sg;
|
|
|
+ goto cleanup_queue;
|
|
|
}
|
|
|
|
|
|
return 0;
|
|
|
- free_bounce_sg:
|
|
|
- kfree(mqrq_cur->bounce_sg);
|
|
|
- mqrq_cur->bounce_sg = NULL;
|
|
|
- kfree(mqrq_prev->bounce_sg);
|
|
|
- mqrq_prev->bounce_sg = NULL;
|
|
|
|
|
|
cleanup_queue:
|
|
|
- kfree(mqrq_cur->sg);
|
|
|
- mqrq_cur->sg = NULL;
|
|
|
- kfree(mqrq_cur->bounce_buf);
|
|
|
- mqrq_cur->bounce_buf = NULL;
|
|
|
-
|
|
|
- kfree(mqrq_prev->sg);
|
|
|
- mqrq_prev->sg = NULL;
|
|
|
- kfree(mqrq_prev->bounce_buf);
|
|
|
- mqrq_prev->bounce_buf = NULL;
|
|
|
-
|
|
|
+ mmc_queue_reqs_free_bufs(mq);
|
|
|
blk_cleanup_queue(mq->queue);
|
|
|
return ret;
|
|
|
}
|
|
@@ -365,8 +370,6 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
|
|
|
{
|
|
|
struct request_queue *q = mq->queue;
|
|
|
unsigned long flags;
|
|
|
- struct mmc_queue_req *mqrq_cur = mq->mqrq_cur;
|
|
|
- struct mmc_queue_req *mqrq_prev = mq->mqrq_prev;
|
|
|
|
|
|
/* Make sure the queue isn't suspended, as that will deadlock */
|
|
|
mmc_queue_resume(mq);
|
|
@@ -380,23 +383,7 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
|
|
|
blk_start_queue(q);
|
|
|
spin_unlock_irqrestore(q->queue_lock, flags);
|
|
|
|
|
|
- kfree(mqrq_cur->bounce_sg);
|
|
|
- mqrq_cur->bounce_sg = NULL;
|
|
|
-
|
|
|
- kfree(mqrq_cur->sg);
|
|
|
- mqrq_cur->sg = NULL;
|
|
|
-
|
|
|
- kfree(mqrq_cur->bounce_buf);
|
|
|
- mqrq_cur->bounce_buf = NULL;
|
|
|
-
|
|
|
- kfree(mqrq_prev->bounce_sg);
|
|
|
- mqrq_prev->bounce_sg = NULL;
|
|
|
-
|
|
|
- kfree(mqrq_prev->sg);
|
|
|
- mqrq_prev->sg = NULL;
|
|
|
-
|
|
|
- kfree(mqrq_prev->bounce_buf);
|
|
|
- mqrq_prev->bounce_buf = NULL;
|
|
|
+ mmc_queue_reqs_free_bufs(mq);
|
|
|
|
|
|
mq->card = NULL;
|
|
|
}
|