queue.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530
  1. /*
  2. * Copyright (C) 2003 Russell King, All Rights Reserved.
  3. * Copyright 2006-2007 Pierre Ossman
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 as
  7. * published by the Free Software Foundation.
  8. *
  9. */
  10. #include <linux/slab.h>
  11. #include <linux/module.h>
  12. #include <linux/blkdev.h>
  13. #include <linux/freezer.h>
  14. #include <linux/kthread.h>
  15. #include <linux/scatterlist.h>
  16. #include <linux/dma-mapping.h>
  17. #include <linux/mmc/card.h>
  18. #include <linux/mmc/host.h>
  19. #include "queue.h"
  20. #include "block.h"
  21. #include "core.h"
  22. #include "card.h"
  23. #define MMC_QUEUE_BOUNCESZ 65536
  24. /*
  25. * Prepare a MMC request. This just filters out odd stuff.
  26. */
  27. static int mmc_prep_request(struct request_queue *q, struct request *req)
  28. {
  29. struct mmc_queue *mq = q->queuedata;
  30. if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq)))
  31. return BLKPREP_KILL;
  32. req->rq_flags |= RQF_DONTPREP;
  33. return BLKPREP_OK;
  34. }
  35. struct mmc_queue_req *mmc_queue_req_find(struct mmc_queue *mq,
  36. struct request *req)
  37. {
  38. struct mmc_queue_req *mqrq;
  39. int i = ffz(mq->qslots);
  40. if (i >= mq->qdepth)
  41. return NULL;
  42. mqrq = &mq->mqrq[i];
  43. WARN_ON(mqrq->req || mq->qcnt >= mq->qdepth ||
  44. test_bit(mqrq->task_id, &mq->qslots));
  45. mqrq->req = req;
  46. mq->qcnt += 1;
  47. __set_bit(mqrq->task_id, &mq->qslots);
  48. return mqrq;
  49. }
  50. void mmc_queue_req_free(struct mmc_queue *mq,
  51. struct mmc_queue_req *mqrq)
  52. {
  53. WARN_ON(!mqrq->req || mq->qcnt < 1 ||
  54. !test_bit(mqrq->task_id, &mq->qslots));
  55. mqrq->req = NULL;
  56. mq->qcnt -= 1;
  57. __clear_bit(mqrq->task_id, &mq->qslots);
  58. }
  59. static int mmc_queue_thread(void *d)
  60. {
  61. struct mmc_queue *mq = d;
  62. struct request_queue *q = mq->queue;
  63. struct mmc_context_info *cntx = &mq->card->host->context_info;
  64. current->flags |= PF_MEMALLOC;
  65. down(&mq->thread_sem);
  66. do {
  67. struct request *req;
  68. spin_lock_irq(q->queue_lock);
  69. set_current_state(TASK_INTERRUPTIBLE);
  70. req = blk_fetch_request(q);
  71. mq->asleep = false;
  72. cntx->is_waiting_last_req = false;
  73. cntx->is_new_req = false;
  74. if (!req) {
  75. /*
  76. * Dispatch queue is empty so set flags for
  77. * mmc_request_fn() to wake us up.
  78. */
  79. if (mq->qcnt)
  80. cntx->is_waiting_last_req = true;
  81. else
  82. mq->asleep = true;
  83. }
  84. spin_unlock_irq(q->queue_lock);
  85. if (req || mq->qcnt) {
  86. set_current_state(TASK_RUNNING);
  87. mmc_blk_issue_rq(mq, req);
  88. cond_resched();
  89. } else {
  90. if (kthread_should_stop()) {
  91. set_current_state(TASK_RUNNING);
  92. break;
  93. }
  94. up(&mq->thread_sem);
  95. schedule();
  96. down(&mq->thread_sem);
  97. }
  98. } while (1);
  99. up(&mq->thread_sem);
  100. return 0;
  101. }
  102. /*
  103. * Generic MMC request handler. This is called for any queue on a
  104. * particular host. When the host is not busy, we look for a request
  105. * on any queue on this host, and attempt to issue it. This may
  106. * not be the queue we were asked to process.
  107. */
  108. static void mmc_request_fn(struct request_queue *q)
  109. {
  110. struct mmc_queue *mq = q->queuedata;
  111. struct request *req;
  112. struct mmc_context_info *cntx;
  113. if (!mq) {
  114. while ((req = blk_fetch_request(q)) != NULL) {
  115. req->rq_flags |= RQF_QUIET;
  116. __blk_end_request_all(req, -EIO);
  117. }
  118. return;
  119. }
  120. cntx = &mq->card->host->context_info;
  121. if (cntx->is_waiting_last_req) {
  122. cntx->is_new_req = true;
  123. wake_up_interruptible(&cntx->wait);
  124. }
  125. if (mq->asleep)
  126. wake_up_process(mq->thread);
  127. }
  128. static struct scatterlist *mmc_alloc_sg(int sg_len)
  129. {
  130. struct scatterlist *sg;
  131. sg = kmalloc_array(sg_len, sizeof(*sg), GFP_KERNEL);
  132. if (sg)
  133. sg_init_table(sg, sg_len);
  134. return sg;
  135. }
  136. static void mmc_queue_setup_discard(struct request_queue *q,
  137. struct mmc_card *card)
  138. {
  139. unsigned max_discard;
  140. max_discard = mmc_calc_max_discard(card);
  141. if (!max_discard)
  142. return;
  143. queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
  144. blk_queue_max_discard_sectors(q, max_discard);
  145. q->limits.discard_granularity = card->pref_erase << 9;
  146. /* granularity must not be greater than max. discard */
  147. if (card->pref_erase > max_discard)
  148. q->limits.discard_granularity = 0;
  149. if (mmc_can_secure_erase_trim(card))
  150. queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q);
  151. }
  152. static void mmc_queue_req_free_bufs(struct mmc_queue_req *mqrq)
  153. {
  154. kfree(mqrq->bounce_sg);
  155. mqrq->bounce_sg = NULL;
  156. kfree(mqrq->sg);
  157. mqrq->sg = NULL;
  158. kfree(mqrq->bounce_buf);
  159. mqrq->bounce_buf = NULL;
  160. }
  161. static void mmc_queue_reqs_free_bufs(struct mmc_queue_req *mqrq, int qdepth)
  162. {
  163. int i;
  164. for (i = 0; i < qdepth; i++)
  165. mmc_queue_req_free_bufs(&mqrq[i]);
  166. }
  167. static void mmc_queue_free_mqrqs(struct mmc_queue_req *mqrq, int qdepth)
  168. {
  169. mmc_queue_reqs_free_bufs(mqrq, qdepth);
  170. kfree(mqrq);
  171. }
  172. static struct mmc_queue_req *mmc_queue_alloc_mqrqs(int qdepth)
  173. {
  174. struct mmc_queue_req *mqrq;
  175. int i;
  176. mqrq = kcalloc(qdepth, sizeof(*mqrq), GFP_KERNEL);
  177. if (mqrq) {
  178. for (i = 0; i < qdepth; i++)
  179. mqrq[i].task_id = i;
  180. }
  181. return mqrq;
  182. }
  183. static int mmc_queue_alloc_bounce_bufs(struct mmc_queue_req *mqrq, int qdepth,
  184. unsigned int bouncesz)
  185. {
  186. int i;
  187. for (i = 0; i < qdepth; i++) {
  188. mqrq[i].bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
  189. if (!mqrq[i].bounce_buf)
  190. return -ENOMEM;
  191. mqrq[i].sg = mmc_alloc_sg(1);
  192. if (!mqrq[i].sg)
  193. return -ENOMEM;
  194. mqrq[i].bounce_sg = mmc_alloc_sg(bouncesz / 512);
  195. if (!mqrq[i].bounce_sg)
  196. return -ENOMEM;
  197. }
  198. return 0;
  199. }
  200. static bool mmc_queue_alloc_bounce(struct mmc_queue_req *mqrq, int qdepth,
  201. unsigned int bouncesz)
  202. {
  203. int ret;
  204. ret = mmc_queue_alloc_bounce_bufs(mqrq, qdepth, bouncesz);
  205. if (ret)
  206. mmc_queue_reqs_free_bufs(mqrq, qdepth);
  207. return !ret;
  208. }
  209. static unsigned int mmc_queue_calc_bouncesz(struct mmc_host *host)
  210. {
  211. unsigned int bouncesz = MMC_QUEUE_BOUNCESZ;
  212. if (host->max_segs != 1 || (host->caps & MMC_CAP_NO_BOUNCE_BUFF))
  213. return 0;
  214. if (bouncesz > host->max_req_size)
  215. bouncesz = host->max_req_size;
  216. if (bouncesz > host->max_seg_size)
  217. bouncesz = host->max_seg_size;
  218. if (bouncesz > host->max_blk_count * 512)
  219. bouncesz = host->max_blk_count * 512;
  220. if (bouncesz <= 512)
  221. return 0;
  222. return bouncesz;
  223. }
  224. static int mmc_queue_alloc_sgs(struct mmc_queue_req *mqrq, int qdepth,
  225. int max_segs)
  226. {
  227. int i;
  228. for (i = 0; i < qdepth; i++) {
  229. mqrq[i].sg = mmc_alloc_sg(max_segs);
  230. if (!mqrq[i].sg)
  231. return -ENOMEM;
  232. }
  233. return 0;
  234. }
  235. void mmc_queue_free_shared_queue(struct mmc_card *card)
  236. {
  237. if (card->mqrq) {
  238. mmc_queue_free_mqrqs(card->mqrq, card->qdepth);
  239. card->mqrq = NULL;
  240. }
  241. }
  242. static int __mmc_queue_alloc_shared_queue(struct mmc_card *card, int qdepth)
  243. {
  244. struct mmc_host *host = card->host;
  245. struct mmc_queue_req *mqrq;
  246. unsigned int bouncesz;
  247. int ret = 0;
  248. if (card->mqrq)
  249. return -EINVAL;
  250. mqrq = mmc_queue_alloc_mqrqs(qdepth);
  251. if (!mqrq)
  252. return -ENOMEM;
  253. card->mqrq = mqrq;
  254. card->qdepth = qdepth;
  255. bouncesz = mmc_queue_calc_bouncesz(host);
  256. if (bouncesz && !mmc_queue_alloc_bounce(mqrq, qdepth, bouncesz)) {
  257. bouncesz = 0;
  258. pr_warn("%s: unable to allocate bounce buffers\n",
  259. mmc_card_name(card));
  260. }
  261. card->bouncesz = bouncesz;
  262. if (!bouncesz) {
  263. ret = mmc_queue_alloc_sgs(mqrq, qdepth, host->max_segs);
  264. if (ret)
  265. goto out_err;
  266. }
  267. return ret;
  268. out_err:
  269. mmc_queue_free_shared_queue(card);
  270. return ret;
  271. }
  272. int mmc_queue_alloc_shared_queue(struct mmc_card *card)
  273. {
  274. return __mmc_queue_alloc_shared_queue(card, 2);
  275. }
  276. /**
  277. * mmc_init_queue - initialise a queue structure.
  278. * @mq: mmc queue
  279. * @card: mmc card to attach this queue
  280. * @lock: queue lock
  281. * @subname: partition subname
  282. *
  283. * Initialise a MMC card request queue.
  284. */
  285. int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
  286. spinlock_t *lock, const char *subname)
  287. {
  288. struct mmc_host *host = card->host;
  289. u64 limit = BLK_BOUNCE_HIGH;
  290. int ret = -ENOMEM;
  291. if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
  292. limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
  293. mq->card = card;
  294. mq->queue = blk_init_queue(mmc_request_fn, lock);
  295. if (!mq->queue)
  296. return -ENOMEM;
  297. mq->mqrq = card->mqrq;
  298. mq->qdepth = card->qdepth;
  299. mq->queue->queuedata = mq;
  300. blk_queue_prep_rq(mq->queue, mmc_prep_request);
  301. queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
  302. queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue);
  303. if (mmc_can_erase(card))
  304. mmc_queue_setup_discard(mq->queue, card);
  305. if (card->bouncesz) {
  306. blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
  307. blk_queue_max_hw_sectors(mq->queue, card->bouncesz / 512);
  308. blk_queue_max_segments(mq->queue, card->bouncesz / 512);
  309. blk_queue_max_segment_size(mq->queue, card->bouncesz);
  310. } else {
  311. blk_queue_bounce_limit(mq->queue, limit);
  312. blk_queue_max_hw_sectors(mq->queue,
  313. min(host->max_blk_count, host->max_req_size / 512));
  314. blk_queue_max_segments(mq->queue, host->max_segs);
  315. blk_queue_max_segment_size(mq->queue, host->max_seg_size);
  316. }
  317. sema_init(&mq->thread_sem, 1);
  318. mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
  319. host->index, subname ? subname : "");
  320. if (IS_ERR(mq->thread)) {
  321. ret = PTR_ERR(mq->thread);
  322. goto cleanup_queue;
  323. }
  324. return 0;
  325. cleanup_queue:
  326. mq->mqrq = NULL;
  327. blk_cleanup_queue(mq->queue);
  328. return ret;
  329. }
  330. void mmc_cleanup_queue(struct mmc_queue *mq)
  331. {
  332. struct request_queue *q = mq->queue;
  333. unsigned long flags;
  334. /* Make sure the queue isn't suspended, as that will deadlock */
  335. mmc_queue_resume(mq);
  336. /* Then terminate our worker thread */
  337. kthread_stop(mq->thread);
  338. /* Empty the queue */
  339. spin_lock_irqsave(q->queue_lock, flags);
  340. q->queuedata = NULL;
  341. blk_start_queue(q);
  342. spin_unlock_irqrestore(q->queue_lock, flags);
  343. mq->mqrq = NULL;
  344. mq->card = NULL;
  345. }
  346. EXPORT_SYMBOL(mmc_cleanup_queue);
  347. /**
  348. * mmc_queue_suspend - suspend a MMC request queue
  349. * @mq: MMC queue to suspend
  350. *
  351. * Stop the block request queue, and wait for our thread to
  352. * complete any outstanding requests. This ensures that we
  353. * won't suspend while a request is being processed.
  354. */
  355. void mmc_queue_suspend(struct mmc_queue *mq)
  356. {
  357. struct request_queue *q = mq->queue;
  358. unsigned long flags;
  359. if (!mq->suspended) {
  360. mq->suspended |= true;
  361. spin_lock_irqsave(q->queue_lock, flags);
  362. blk_stop_queue(q);
  363. spin_unlock_irqrestore(q->queue_lock, flags);
  364. down(&mq->thread_sem);
  365. }
  366. }
  367. /**
  368. * mmc_queue_resume - resume a previously suspended MMC request queue
  369. * @mq: MMC queue to resume
  370. */
  371. void mmc_queue_resume(struct mmc_queue *mq)
  372. {
  373. struct request_queue *q = mq->queue;
  374. unsigned long flags;
  375. if (mq->suspended) {
  376. mq->suspended = false;
  377. up(&mq->thread_sem);
  378. spin_lock_irqsave(q->queue_lock, flags);
  379. blk_start_queue(q);
  380. spin_unlock_irqrestore(q->queue_lock, flags);
  381. }
  382. }
  383. /*
  384. * Prepare the sg list(s) to be handed of to the host driver
  385. */
  386. unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
  387. {
  388. unsigned int sg_len;
  389. size_t buflen;
  390. struct scatterlist *sg;
  391. int i;
  392. if (!mqrq->bounce_buf)
  393. return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
  394. sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
  395. mqrq->bounce_sg_len = sg_len;
  396. buflen = 0;
  397. for_each_sg(mqrq->bounce_sg, sg, sg_len, i)
  398. buflen += sg->length;
  399. sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen);
  400. return 1;
  401. }
  402. /*
  403. * If writing, bounce the data to the buffer before the request
  404. * is sent to the host driver
  405. */
  406. void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq)
  407. {
  408. if (!mqrq->bounce_buf)
  409. return;
  410. if (rq_data_dir(mqrq->req) != WRITE)
  411. return;
  412. sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
  413. mqrq->bounce_buf, mqrq->sg[0].length);
  414. }
  415. /*
  416. * If reading, bounce the data from the buffer after the request
  417. * has been handled by the host driver
  418. */
  419. void mmc_queue_bounce_post(struct mmc_queue_req *mqrq)
  420. {
  421. if (!mqrq->bounce_buf)
  422. return;
  423. if (rq_data_dir(mqrq->req) != READ)
  424. return;
  425. sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
  426. mqrq->bounce_buf, mqrq->sg[0].length);
  427. }