tmio_mmc_dma.c 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356
  1. /*
  2. * linux/drivers/mmc/tmio_mmc_dma.c
  3. *
  4. * Copyright (C) 2010-2011 Guennadi Liakhovetski
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * DMA function for TMIO MMC implementations
  11. */
  12. #include <linux/device.h>
  13. #include <linux/dma-mapping.h>
  14. #include <linux/dmaengine.h>
  15. #include <linux/mfd/tmio.h>
  16. #include <linux/mmc/host.h>
  17. #include <linux/mmc/tmio.h>
  18. #include <linux/pagemap.h>
  19. #include <linux/scatterlist.h>
  20. #include "tmio_mmc.h"
  21. #define TMIO_MMC_MIN_DMA_LEN 8
  22. void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable)
  23. {
  24. if (!host->chan_tx || !host->chan_rx)
  25. return;
  26. if (host->dma->enable)
  27. host->dma->enable(host, enable);
  28. }
  29. void tmio_mmc_abort_dma(struct tmio_mmc_host *host)
  30. {
  31. tmio_mmc_enable_dma(host, false);
  32. if (host->chan_rx)
  33. dmaengine_terminate_all(host->chan_rx);
  34. if (host->chan_tx)
  35. dmaengine_terminate_all(host->chan_tx);
  36. tmio_mmc_enable_dma(host, true);
  37. }
  38. static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
  39. {
  40. struct scatterlist *sg = host->sg_ptr, *sg_tmp;
  41. struct dma_async_tx_descriptor *desc = NULL;
  42. struct dma_chan *chan = host->chan_rx;
  43. dma_cookie_t cookie;
  44. int ret, i;
  45. bool aligned = true, multiple = true;
  46. unsigned int align = (1 << host->pdata->alignment_shift) - 1;
  47. for_each_sg(sg, sg_tmp, host->sg_len, i) {
  48. if (sg_tmp->offset & align)
  49. aligned = false;
  50. if (sg_tmp->length & align) {
  51. multiple = false;
  52. break;
  53. }
  54. }
  55. if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE ||
  56. (align & PAGE_MASK))) || !multiple) {
  57. ret = -EINVAL;
  58. goto pio;
  59. }
  60. if (sg->length < TMIO_MMC_MIN_DMA_LEN) {
  61. host->force_pio = true;
  62. return;
  63. }
  64. tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_RXRDY);
  65. /* The only sg element can be unaligned, use our bounce buffer then */
  66. if (!aligned) {
  67. sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length);
  68. host->sg_ptr = &host->bounce_sg;
  69. sg = host->sg_ptr;
  70. }
  71. ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE);
  72. if (ret > 0)
  73. desc = dmaengine_prep_slave_sg(chan, sg, ret,
  74. DMA_DEV_TO_MEM, DMA_CTRL_ACK);
  75. if (desc) {
  76. cookie = dmaengine_submit(desc);
  77. if (cookie < 0) {
  78. desc = NULL;
  79. ret = cookie;
  80. }
  81. }
  82. dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
  83. __func__, host->sg_len, ret, cookie, host->mrq);
  84. pio:
  85. if (!desc) {
  86. /* DMA failed, fall back to PIO */
  87. tmio_mmc_enable_dma(host, false);
  88. if (ret >= 0)
  89. ret = -EIO;
  90. host->chan_rx = NULL;
  91. dma_release_channel(chan);
  92. /* Free the Tx channel too */
  93. chan = host->chan_tx;
  94. if (chan) {
  95. host->chan_tx = NULL;
  96. dma_release_channel(chan);
  97. }
  98. dev_warn(&host->pdev->dev,
  99. "DMA failed: %d, falling back to PIO\n", ret);
  100. }
  101. dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
  102. desc, cookie, host->sg_len);
  103. }
  104. static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
  105. {
  106. struct scatterlist *sg = host->sg_ptr, *sg_tmp;
  107. struct dma_async_tx_descriptor *desc = NULL;
  108. struct dma_chan *chan = host->chan_tx;
  109. dma_cookie_t cookie;
  110. int ret, i;
  111. bool aligned = true, multiple = true;
  112. unsigned int align = (1 << host->pdata->alignment_shift) - 1;
  113. for_each_sg(sg, sg_tmp, host->sg_len, i) {
  114. if (sg_tmp->offset & align)
  115. aligned = false;
  116. if (sg_tmp->length & align) {
  117. multiple = false;
  118. break;
  119. }
  120. }
  121. if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE ||
  122. (align & PAGE_MASK))) || !multiple) {
  123. ret = -EINVAL;
  124. goto pio;
  125. }
  126. if (sg->length < TMIO_MMC_MIN_DMA_LEN) {
  127. host->force_pio = true;
  128. return;
  129. }
  130. tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_TXRQ);
  131. /* The only sg element can be unaligned, use our bounce buffer then */
  132. if (!aligned) {
  133. unsigned long flags;
  134. void *sg_vaddr = tmio_mmc_kmap_atomic(sg, &flags);
  135. sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length);
  136. memcpy(host->bounce_buf, sg_vaddr, host->bounce_sg.length);
  137. tmio_mmc_kunmap_atomic(sg, &flags, sg_vaddr);
  138. host->sg_ptr = &host->bounce_sg;
  139. sg = host->sg_ptr;
  140. }
  141. ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE);
  142. if (ret > 0)
  143. desc = dmaengine_prep_slave_sg(chan, sg, ret,
  144. DMA_MEM_TO_DEV, DMA_CTRL_ACK);
  145. if (desc) {
  146. cookie = dmaengine_submit(desc);
  147. if (cookie < 0) {
  148. desc = NULL;
  149. ret = cookie;
  150. }
  151. }
  152. dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
  153. __func__, host->sg_len, ret, cookie, host->mrq);
  154. pio:
  155. if (!desc) {
  156. /* DMA failed, fall back to PIO */
  157. tmio_mmc_enable_dma(host, false);
  158. if (ret >= 0)
  159. ret = -EIO;
  160. host->chan_tx = NULL;
  161. dma_release_channel(chan);
  162. /* Free the Rx channel too */
  163. chan = host->chan_rx;
  164. if (chan) {
  165. host->chan_rx = NULL;
  166. dma_release_channel(chan);
  167. }
  168. dev_warn(&host->pdev->dev,
  169. "DMA failed: %d, falling back to PIO\n", ret);
  170. }
  171. dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__,
  172. desc, cookie);
  173. }
  174. void tmio_mmc_start_dma(struct tmio_mmc_host *host,
  175. struct mmc_data *data)
  176. {
  177. if (data->flags & MMC_DATA_READ) {
  178. if (host->chan_rx)
  179. tmio_mmc_start_dma_rx(host);
  180. } else {
  181. if (host->chan_tx)
  182. tmio_mmc_start_dma_tx(host);
  183. }
  184. }
  185. static void tmio_mmc_issue_tasklet_fn(unsigned long priv)
  186. {
  187. struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv;
  188. struct dma_chan *chan = NULL;
  189. spin_lock_irq(&host->lock);
  190. if (host && host->data) {
  191. if (host->data->flags & MMC_DATA_READ)
  192. chan = host->chan_rx;
  193. else
  194. chan = host->chan_tx;
  195. }
  196. spin_unlock_irq(&host->lock);
  197. tmio_mmc_enable_mmc_irqs(host, TMIO_STAT_DATAEND);
  198. if (chan)
  199. dma_async_issue_pending(chan);
  200. }
  201. static void tmio_mmc_tasklet_fn(unsigned long arg)
  202. {
  203. struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg;
  204. spin_lock_irq(&host->lock);
  205. if (!host->data)
  206. goto out;
  207. if (host->data->flags & MMC_DATA_READ)
  208. dma_unmap_sg(host->chan_rx->device->dev,
  209. host->sg_ptr, host->sg_len,
  210. DMA_FROM_DEVICE);
  211. else
  212. dma_unmap_sg(host->chan_tx->device->dev,
  213. host->sg_ptr, host->sg_len,
  214. DMA_TO_DEVICE);
  215. tmio_mmc_do_data_irq(host);
  216. out:
  217. spin_unlock_irq(&host->lock);
  218. }
  219. void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata)
  220. {
  221. /* We can only either use DMA for both Tx and Rx or not use it at all */
  222. if (!host->dma || (!host->pdev->dev.of_node &&
  223. (!pdata->chan_priv_tx || !pdata->chan_priv_rx)))
  224. return;
  225. if (!host->chan_tx && !host->chan_rx) {
  226. struct resource *res = platform_get_resource(host->pdev,
  227. IORESOURCE_MEM, 0);
  228. struct dma_slave_config cfg = {};
  229. dma_cap_mask_t mask;
  230. int ret;
  231. if (!res)
  232. return;
  233. dma_cap_zero(mask);
  234. dma_cap_set(DMA_SLAVE, mask);
  235. host->chan_tx = dma_request_slave_channel_compat(mask,
  236. host->dma->filter, pdata->chan_priv_tx,
  237. &host->pdev->dev, "tx");
  238. dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__,
  239. host->chan_tx);
  240. if (!host->chan_tx)
  241. return;
  242. cfg.direction = DMA_MEM_TO_DEV;
  243. cfg.dst_addr = res->start + (CTL_SD_DATA_PORT << host->bus_shift);
  244. cfg.dst_addr_width = host->dma->dma_buswidth;
  245. if (!cfg.dst_addr_width)
  246. cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
  247. cfg.src_addr = 0;
  248. ret = dmaengine_slave_config(host->chan_tx, &cfg);
  249. if (ret < 0)
  250. goto ecfgtx;
  251. host->chan_rx = dma_request_slave_channel_compat(mask,
  252. host->dma->filter, pdata->chan_priv_rx,
  253. &host->pdev->dev, "rx");
  254. dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__,
  255. host->chan_rx);
  256. if (!host->chan_rx)
  257. goto ereqrx;
  258. cfg.direction = DMA_DEV_TO_MEM;
  259. cfg.src_addr = cfg.dst_addr + host->pdata->dma_rx_offset;
  260. cfg.src_addr_width = host->dma->dma_buswidth;
  261. if (!cfg.src_addr_width)
  262. cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
  263. cfg.dst_addr = 0;
  264. ret = dmaengine_slave_config(host->chan_rx, &cfg);
  265. if (ret < 0)
  266. goto ecfgrx;
  267. host->bounce_buf = (u8 *)__get_free_page(GFP_KERNEL | GFP_DMA);
  268. if (!host->bounce_buf)
  269. goto ebouncebuf;
  270. tasklet_init(&host->dma_complete, tmio_mmc_tasklet_fn, (unsigned long)host);
  271. tasklet_init(&host->dma_issue, tmio_mmc_issue_tasklet_fn, (unsigned long)host);
  272. }
  273. tmio_mmc_enable_dma(host, true);
  274. return;
  275. ebouncebuf:
  276. ecfgrx:
  277. dma_release_channel(host->chan_rx);
  278. host->chan_rx = NULL;
  279. ereqrx:
  280. ecfgtx:
  281. dma_release_channel(host->chan_tx);
  282. host->chan_tx = NULL;
  283. }
  284. void tmio_mmc_release_dma(struct tmio_mmc_host *host)
  285. {
  286. if (host->chan_tx) {
  287. struct dma_chan *chan = host->chan_tx;
  288. host->chan_tx = NULL;
  289. dma_release_channel(chan);
  290. }
  291. if (host->chan_rx) {
  292. struct dma_chan *chan = host->chan_rx;
  293. host->chan_rx = NULL;
  294. dma_release_channel(chan);
  295. }
  296. if (host->bounce_buf) {
  297. free_pages((unsigned long)host->bounce_buf, 0);
  298. host->bounce_buf = NULL;
  299. }
  300. }