queue.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543
  1. /*
  2. * Copyright (C) 2003 Russell King, All Rights Reserved.
  3. * Copyright 2006-2007 Pierre Ossman
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 as
  7. * published by the Free Software Foundation.
  8. *
  9. */
  10. #include <linux/slab.h>
  11. #include <linux/module.h>
  12. #include <linux/blkdev.h>
  13. #include <linux/freezer.h>
  14. #include <linux/kthread.h>
  15. #include <linux/scatterlist.h>
  16. #include <linux/dma-mapping.h>
  17. #include <linux/mmc/card.h>
  18. #include <linux/mmc/host.h>
  19. #include "queue.h"
  20. #include "block.h"
  21. #include "core.h"
  22. #include "card.h"
  23. #define MMC_QUEUE_BOUNCESZ 65536
  24. /*
  25. * Prepare a MMC request. This just filters out odd stuff.
  26. */
  27. static int mmc_prep_request(struct request_queue *q, struct request *req)
  28. {
  29. struct mmc_queue *mq = q->queuedata;
  30. if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq)))
  31. return BLKPREP_KILL;
  32. req->rq_flags |= RQF_DONTPREP;
  33. return BLKPREP_OK;
  34. }
  35. struct mmc_queue_req *mmc_queue_req_find(struct mmc_queue *mq,
  36. struct request *req)
  37. {
  38. struct mmc_queue_req *mqrq;
  39. int i = ffz(mq->qslots);
  40. if (i >= mq->qdepth)
  41. return NULL;
  42. mqrq = &mq->mqrq[i];
  43. WARN_ON(mqrq->req || mq->qcnt >= mq->qdepth ||
  44. test_bit(mqrq->task_id, &mq->qslots));
  45. mqrq->req = req;
  46. mq->qcnt += 1;
  47. __set_bit(mqrq->task_id, &mq->qslots);
  48. return mqrq;
  49. }
  50. void mmc_queue_req_free(struct mmc_queue *mq,
  51. struct mmc_queue_req *mqrq)
  52. {
  53. WARN_ON(!mqrq->req || mq->qcnt < 1 ||
  54. !test_bit(mqrq->task_id, &mq->qslots));
  55. mqrq->req = NULL;
  56. mq->qcnt -= 1;
  57. __clear_bit(mqrq->task_id, &mq->qslots);
  58. }
  59. static int mmc_queue_thread(void *d)
  60. {
  61. struct mmc_queue *mq = d;
  62. struct request_queue *q = mq->queue;
  63. struct mmc_context_info *cntx = &mq->card->host->context_info;
  64. current->flags |= PF_MEMALLOC;
  65. down(&mq->thread_sem);
  66. do {
  67. struct request *req;
  68. spin_lock_irq(q->queue_lock);
  69. set_current_state(TASK_INTERRUPTIBLE);
  70. req = blk_fetch_request(q);
  71. mq->asleep = false;
  72. cntx->is_waiting_last_req = false;
  73. cntx->is_new_req = false;
  74. if (!req) {
  75. /*
  76. * Dispatch queue is empty so set flags for
  77. * mmc_request_fn() to wake us up.
  78. */
  79. if (mq->qcnt)
  80. cntx->is_waiting_last_req = true;
  81. else
  82. mq->asleep = true;
  83. }
  84. spin_unlock_irq(q->queue_lock);
  85. if (req || mq->qcnt) {
  86. set_current_state(TASK_RUNNING);
  87. mmc_blk_issue_rq(mq, req);
  88. cond_resched();
  89. } else {
  90. if (kthread_should_stop()) {
  91. set_current_state(TASK_RUNNING);
  92. break;
  93. }
  94. up(&mq->thread_sem);
  95. schedule();
  96. down(&mq->thread_sem);
  97. }
  98. } while (1);
  99. up(&mq->thread_sem);
  100. return 0;
  101. }
  102. /*
  103. * Generic MMC request handler. This is called for any queue on a
  104. * particular host. When the host is not busy, we look for a request
  105. * on any queue on this host, and attempt to issue it. This may
  106. * not be the queue we were asked to process.
  107. */
  108. static void mmc_request_fn(struct request_queue *q)
  109. {
  110. struct mmc_queue *mq = q->queuedata;
  111. struct request *req;
  112. struct mmc_context_info *cntx;
  113. if (!mq) {
  114. while ((req = blk_fetch_request(q)) != NULL) {
  115. req->rq_flags |= RQF_QUIET;
  116. __blk_end_request_all(req, -EIO);
  117. }
  118. return;
  119. }
  120. cntx = &mq->card->host->context_info;
  121. if (cntx->is_waiting_last_req) {
  122. cntx->is_new_req = true;
  123. wake_up_interruptible(&cntx->wait);
  124. }
  125. if (mq->asleep)
  126. wake_up_process(mq->thread);
  127. }
  128. static struct scatterlist *mmc_alloc_sg(int sg_len)
  129. {
  130. struct scatterlist *sg;
  131. sg = kmalloc_array(sg_len, sizeof(*sg), GFP_KERNEL);
  132. if (sg)
  133. sg_init_table(sg, sg_len);
  134. return sg;
  135. }
  136. static void mmc_queue_setup_discard(struct request_queue *q,
  137. struct mmc_card *card)
  138. {
  139. unsigned max_discard;
  140. max_discard = mmc_calc_max_discard(card);
  141. if (!max_discard)
  142. return;
  143. queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
  144. blk_queue_max_discard_sectors(q, max_discard);
  145. q->limits.discard_granularity = card->pref_erase << 9;
  146. /* granularity must not be greater than max. discard */
  147. if (card->pref_erase > max_discard)
  148. q->limits.discard_granularity = 0;
  149. if (mmc_can_secure_erase_trim(card))
  150. queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q);
  151. }
  152. static void mmc_queue_req_free_bufs(struct mmc_queue_req *mqrq)
  153. {
  154. kfree(mqrq->bounce_sg);
  155. mqrq->bounce_sg = NULL;
  156. kfree(mqrq->sg);
  157. mqrq->sg = NULL;
  158. kfree(mqrq->bounce_buf);
  159. mqrq->bounce_buf = NULL;
  160. }
  161. static void mmc_queue_reqs_free_bufs(struct mmc_queue_req *mqrq, int qdepth)
  162. {
  163. int i;
  164. for (i = 0; i < qdepth; i++)
  165. mmc_queue_req_free_bufs(&mqrq[i]);
  166. }
  167. static void mmc_queue_free_mqrqs(struct mmc_queue_req *mqrq, int qdepth)
  168. {
  169. mmc_queue_reqs_free_bufs(mqrq, qdepth);
  170. kfree(mqrq);
  171. }
  172. static struct mmc_queue_req *mmc_queue_alloc_mqrqs(int qdepth)
  173. {
  174. struct mmc_queue_req *mqrq;
  175. int i;
  176. mqrq = kcalloc(qdepth, sizeof(*mqrq), GFP_KERNEL);
  177. if (mqrq) {
  178. for (i = 0; i < qdepth; i++)
  179. mqrq[i].task_id = i;
  180. }
  181. return mqrq;
  182. }
  183. #ifdef CONFIG_MMC_BLOCK_BOUNCE
  184. static int mmc_queue_alloc_bounce_bufs(struct mmc_queue_req *mqrq, int qdepth,
  185. unsigned int bouncesz)
  186. {
  187. int i;
  188. for (i = 0; i < qdepth; i++) {
  189. mqrq[i].bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
  190. if (!mqrq[i].bounce_buf)
  191. return -ENOMEM;
  192. mqrq[i].sg = mmc_alloc_sg(1);
  193. if (!mqrq[i].sg)
  194. return -ENOMEM;
  195. mqrq[i].bounce_sg = mmc_alloc_sg(bouncesz / 512);
  196. if (!mqrq[i].bounce_sg)
  197. return -ENOMEM;
  198. }
  199. return 0;
  200. }
  201. static bool mmc_queue_alloc_bounce(struct mmc_queue_req *mqrq, int qdepth,
  202. unsigned int bouncesz)
  203. {
  204. int ret;
  205. ret = mmc_queue_alloc_bounce_bufs(mqrq, qdepth, bouncesz);
  206. if (ret)
  207. mmc_queue_reqs_free_bufs(mqrq, qdepth);
  208. return !ret;
  209. }
  210. static unsigned int mmc_queue_calc_bouncesz(struct mmc_host *host)
  211. {
  212. unsigned int bouncesz = MMC_QUEUE_BOUNCESZ;
  213. if (host->max_segs != 1)
  214. return 0;
  215. if (bouncesz > host->max_req_size)
  216. bouncesz = host->max_req_size;
  217. if (bouncesz > host->max_seg_size)
  218. bouncesz = host->max_seg_size;
  219. if (bouncesz > host->max_blk_count * 512)
  220. bouncesz = host->max_blk_count * 512;
  221. if (bouncesz <= 512)
  222. return 0;
  223. return bouncesz;
  224. }
  225. #else
  226. static inline bool mmc_queue_alloc_bounce(struct mmc_queue_req *mqrq,
  227. int qdepth, unsigned int bouncesz)
  228. {
  229. return false;
  230. }
  231. static unsigned int mmc_queue_calc_bouncesz(struct mmc_host *host)
  232. {
  233. return 0;
  234. }
  235. #endif
  236. static int mmc_queue_alloc_sgs(struct mmc_queue_req *mqrq, int qdepth,
  237. int max_segs)
  238. {
  239. int i;
  240. for (i = 0; i < qdepth; i++) {
  241. mqrq[i].sg = mmc_alloc_sg(max_segs);
  242. if (!mqrq[i].sg)
  243. return -ENOMEM;
  244. }
  245. return 0;
  246. }
  247. void mmc_queue_free_shared_queue(struct mmc_card *card)
  248. {
  249. if (card->mqrq) {
  250. mmc_queue_free_mqrqs(card->mqrq, card->qdepth);
  251. card->mqrq = NULL;
  252. }
  253. }
  254. static int __mmc_queue_alloc_shared_queue(struct mmc_card *card, int qdepth)
  255. {
  256. struct mmc_host *host = card->host;
  257. struct mmc_queue_req *mqrq;
  258. unsigned int bouncesz;
  259. int ret = 0;
  260. if (card->mqrq)
  261. return -EINVAL;
  262. mqrq = mmc_queue_alloc_mqrqs(qdepth);
  263. if (!mqrq)
  264. return -ENOMEM;
  265. card->mqrq = mqrq;
  266. card->qdepth = qdepth;
  267. bouncesz = mmc_queue_calc_bouncesz(host);
  268. if (bouncesz && !mmc_queue_alloc_bounce(mqrq, qdepth, bouncesz)) {
  269. bouncesz = 0;
  270. pr_warn("%s: unable to allocate bounce buffers\n",
  271. mmc_card_name(card));
  272. }
  273. card->bouncesz = bouncesz;
  274. if (!bouncesz) {
  275. ret = mmc_queue_alloc_sgs(mqrq, qdepth, host->max_segs);
  276. if (ret)
  277. goto out_err;
  278. }
  279. return ret;
  280. out_err:
  281. mmc_queue_free_shared_queue(card);
  282. return ret;
  283. }
  284. int mmc_queue_alloc_shared_queue(struct mmc_card *card)
  285. {
  286. return __mmc_queue_alloc_shared_queue(card, 2);
  287. }
  288. /**
  289. * mmc_init_queue - initialise a queue structure.
  290. * @mq: mmc queue
  291. * @card: mmc card to attach this queue
  292. * @lock: queue lock
  293. * @subname: partition subname
  294. *
  295. * Initialise a MMC card request queue.
  296. */
  297. int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
  298. spinlock_t *lock, const char *subname)
  299. {
  300. struct mmc_host *host = card->host;
  301. u64 limit = BLK_BOUNCE_HIGH;
  302. int ret = -ENOMEM;
  303. if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
  304. limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
  305. mq->card = card;
  306. mq->queue = blk_init_queue(mmc_request_fn, lock);
  307. if (!mq->queue)
  308. return -ENOMEM;
  309. mq->mqrq = card->mqrq;
  310. mq->qdepth = card->qdepth;
  311. mq->queue->queuedata = mq;
  312. blk_queue_prep_rq(mq->queue, mmc_prep_request);
  313. queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
  314. queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue);
  315. if (mmc_can_erase(card))
  316. mmc_queue_setup_discard(mq->queue, card);
  317. if (card->bouncesz) {
  318. blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
  319. blk_queue_max_hw_sectors(mq->queue, card->bouncesz / 512);
  320. blk_queue_max_segments(mq->queue, card->bouncesz / 512);
  321. blk_queue_max_segment_size(mq->queue, card->bouncesz);
  322. } else {
  323. blk_queue_bounce_limit(mq->queue, limit);
  324. blk_queue_max_hw_sectors(mq->queue,
  325. min(host->max_blk_count, host->max_req_size / 512));
  326. blk_queue_max_segments(mq->queue, host->max_segs);
  327. blk_queue_max_segment_size(mq->queue, host->max_seg_size);
  328. }
  329. sema_init(&mq->thread_sem, 1);
  330. mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
  331. host->index, subname ? subname : "");
  332. if (IS_ERR(mq->thread)) {
  333. ret = PTR_ERR(mq->thread);
  334. goto cleanup_queue;
  335. }
  336. return 0;
  337. cleanup_queue:
  338. mq->mqrq = NULL;
  339. blk_cleanup_queue(mq->queue);
  340. return ret;
  341. }
  342. void mmc_cleanup_queue(struct mmc_queue *mq)
  343. {
  344. struct request_queue *q = mq->queue;
  345. unsigned long flags;
  346. /* Make sure the queue isn't suspended, as that will deadlock */
  347. mmc_queue_resume(mq);
  348. /* Then terminate our worker thread */
  349. kthread_stop(mq->thread);
  350. /* Empty the queue */
  351. spin_lock_irqsave(q->queue_lock, flags);
  352. q->queuedata = NULL;
  353. blk_start_queue(q);
  354. spin_unlock_irqrestore(q->queue_lock, flags);
  355. mq->mqrq = NULL;
  356. mq->card = NULL;
  357. }
  358. EXPORT_SYMBOL(mmc_cleanup_queue);
  359. /**
  360. * mmc_queue_suspend - suspend a MMC request queue
  361. * @mq: MMC queue to suspend
  362. *
  363. * Stop the block request queue, and wait for our thread to
  364. * complete any outstanding requests. This ensures that we
  365. * won't suspend while a request is being processed.
  366. */
  367. void mmc_queue_suspend(struct mmc_queue *mq)
  368. {
  369. struct request_queue *q = mq->queue;
  370. unsigned long flags;
  371. if (!mq->suspended) {
  372. mq->suspended |= true;
  373. spin_lock_irqsave(q->queue_lock, flags);
  374. blk_stop_queue(q);
  375. spin_unlock_irqrestore(q->queue_lock, flags);
  376. down(&mq->thread_sem);
  377. }
  378. }
  379. /**
  380. * mmc_queue_resume - resume a previously suspended MMC request queue
  381. * @mq: MMC queue to resume
  382. */
  383. void mmc_queue_resume(struct mmc_queue *mq)
  384. {
  385. struct request_queue *q = mq->queue;
  386. unsigned long flags;
  387. if (mq->suspended) {
  388. mq->suspended = false;
  389. up(&mq->thread_sem);
  390. spin_lock_irqsave(q->queue_lock, flags);
  391. blk_start_queue(q);
  392. spin_unlock_irqrestore(q->queue_lock, flags);
  393. }
  394. }
  395. /*
  396. * Prepare the sg list(s) to be handed of to the host driver
  397. */
  398. unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
  399. {
  400. unsigned int sg_len;
  401. size_t buflen;
  402. struct scatterlist *sg;
  403. int i;
  404. if (!mqrq->bounce_buf)
  405. return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
  406. sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
  407. mqrq->bounce_sg_len = sg_len;
  408. buflen = 0;
  409. for_each_sg(mqrq->bounce_sg, sg, sg_len, i)
  410. buflen += sg->length;
  411. sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen);
  412. return 1;
  413. }
  414. /*
  415. * If writing, bounce the data to the buffer before the request
  416. * is sent to the host driver
  417. */
  418. void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq)
  419. {
  420. if (!mqrq->bounce_buf)
  421. return;
  422. if (rq_data_dir(mqrq->req) != WRITE)
  423. return;
  424. sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
  425. mqrq->bounce_buf, mqrq->sg[0].length);
  426. }
  427. /*
  428. * If reading, bounce the data from the buffer after the request
  429. * has been handled by the host driver
  430. */
  431. void mmc_queue_bounce_post(struct mmc_queue_req *mqrq)
  432. {
  433. if (!mqrq->bounce_buf)
  434. return;
  435. if (rq_data_dir(mqrq->req) != READ)
  436. return;
  437. sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
  438. mqrq->bounce_buf, mqrq->sg[0].length);
  439. }