stm32-dma.c 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353
  1. /*
  2. * Driver for STM32 DMA controller
  3. *
  4. * Inspired by dma-jz4740.c and tegra20-apb-dma.c
  5. *
  6. * Copyright (C) M'boumba Cedric Madianga 2015
  7. * Author: M'boumba Cedric Madianga <cedric.madianga@gmail.com>
  8. * Pierre-Yves Mordret <pierre-yves.mordret@st.com>
  9. *
  10. * License terms: GNU General Public License (GPL), version 2
  11. */
  12. #include <linux/clk.h>
  13. #include <linux/delay.h>
  14. #include <linux/dmaengine.h>
  15. #include <linux/dma-mapping.h>
  16. #include <linux/err.h>
  17. #include <linux/init.h>
  18. #include <linux/jiffies.h>
  19. #include <linux/list.h>
  20. #include <linux/module.h>
  21. #include <linux/of.h>
  22. #include <linux/of_device.h>
  23. #include <linux/of_dma.h>
  24. #include <linux/platform_device.h>
  25. #include <linux/reset.h>
  26. #include <linux/sched.h>
  27. #include <linux/slab.h>
  28. #include "virt-dma.h"
  29. #define STM32_DMA_LISR 0x0000 /* DMA Low Int Status Reg */
  30. #define STM32_DMA_HISR 0x0004 /* DMA High Int Status Reg */
  31. #define STM32_DMA_LIFCR 0x0008 /* DMA Low Int Flag Clear Reg */
  32. #define STM32_DMA_HIFCR 0x000c /* DMA High Int Flag Clear Reg */
  33. #define STM32_DMA_TCI BIT(5) /* Transfer Complete Interrupt */
  34. #define STM32_DMA_HTI BIT(4) /* Half Transfer Interrupt */
  35. #define STM32_DMA_TEI BIT(3) /* Transfer Error Interrupt */
  36. #define STM32_DMA_DMEI BIT(2) /* Direct Mode Error Interrupt */
  37. #define STM32_DMA_FEI BIT(0) /* FIFO Error Interrupt */
  38. #define STM32_DMA_MASKI (STM32_DMA_TCI \
  39. | STM32_DMA_TEI \
  40. | STM32_DMA_DMEI \
  41. | STM32_DMA_FEI)
  42. /* DMA Stream x Configuration Register */
  43. #define STM32_DMA_SCR(x) (0x0010 + 0x18 * (x)) /* x = 0..7 */
  44. #define STM32_DMA_SCR_REQ(n) ((n & 0x7) << 25)
  45. #define STM32_DMA_SCR_MBURST_MASK GENMASK(24, 23)
  46. #define STM32_DMA_SCR_MBURST(n) ((n & 0x3) << 23)
  47. #define STM32_DMA_SCR_PBURST_MASK GENMASK(22, 21)
  48. #define STM32_DMA_SCR_PBURST(n) ((n & 0x3) << 21)
  49. #define STM32_DMA_SCR_PL_MASK GENMASK(17, 16)
  50. #define STM32_DMA_SCR_PL(n) ((n & 0x3) << 16)
  51. #define STM32_DMA_SCR_MSIZE_MASK GENMASK(14, 13)
  52. #define STM32_DMA_SCR_MSIZE(n) ((n & 0x3) << 13)
  53. #define STM32_DMA_SCR_PSIZE_MASK GENMASK(12, 11)
  54. #define STM32_DMA_SCR_PSIZE(n) ((n & 0x3) << 11)
  55. #define STM32_DMA_SCR_PSIZE_GET(n) ((n & STM32_DMA_SCR_PSIZE_MASK) >> 11)
  56. #define STM32_DMA_SCR_DIR_MASK GENMASK(7, 6)
  57. #define STM32_DMA_SCR_DIR(n) ((n & 0x3) << 6)
  58. #define STM32_DMA_SCR_CT BIT(19) /* Target in double buffer */
  59. #define STM32_DMA_SCR_DBM BIT(18) /* Double Buffer Mode */
  60. #define STM32_DMA_SCR_PINCOS BIT(15) /* Peripheral inc offset size */
  61. #define STM32_DMA_SCR_MINC BIT(10) /* Memory increment mode */
  62. #define STM32_DMA_SCR_PINC BIT(9) /* Peripheral increment mode */
  63. #define STM32_DMA_SCR_CIRC BIT(8) /* Circular mode */
  64. #define STM32_DMA_SCR_PFCTRL BIT(5) /* Peripheral Flow Controller */
  65. #define STM32_DMA_SCR_TCIE BIT(4) /* Transfer Complete Int Enable
  66. */
  67. #define STM32_DMA_SCR_TEIE BIT(2) /* Transfer Error Int Enable */
  68. #define STM32_DMA_SCR_DMEIE BIT(1) /* Direct Mode Err Int Enable */
  69. #define STM32_DMA_SCR_EN BIT(0) /* Stream Enable */
  70. #define STM32_DMA_SCR_CFG_MASK (STM32_DMA_SCR_PINC \
  71. | STM32_DMA_SCR_MINC \
  72. | STM32_DMA_SCR_PINCOS \
  73. | STM32_DMA_SCR_PL_MASK)
  74. #define STM32_DMA_SCR_IRQ_MASK (STM32_DMA_SCR_TCIE \
  75. | STM32_DMA_SCR_TEIE \
  76. | STM32_DMA_SCR_DMEIE)
  77. /* DMA Stream x number of data register */
  78. #define STM32_DMA_SNDTR(x) (0x0014 + 0x18 * (x))
  79. /* DMA stream peripheral address register */
  80. #define STM32_DMA_SPAR(x) (0x0018 + 0x18 * (x))
  81. /* DMA stream x memory 0 address register */
  82. #define STM32_DMA_SM0AR(x) (0x001c + 0x18 * (x))
  83. /* DMA stream x memory 1 address register */
  84. #define STM32_DMA_SM1AR(x) (0x0020 + 0x18 * (x))
  85. /* DMA stream x FIFO control register */
  86. #define STM32_DMA_SFCR(x) (0x0024 + 0x18 * (x))
  87. #define STM32_DMA_SFCR_FTH_MASK GENMASK(1, 0)
  88. #define STM32_DMA_SFCR_FTH(n) (n & STM32_DMA_SFCR_FTH_MASK)
  89. #define STM32_DMA_SFCR_FEIE BIT(7) /* FIFO error interrupt enable */
  90. #define STM32_DMA_SFCR_DMDIS BIT(2) /* Direct mode disable */
  91. #define STM32_DMA_SFCR_MASK (STM32_DMA_SFCR_FEIE \
  92. | STM32_DMA_SFCR_DMDIS)
  93. /* DMA direction */
  94. #define STM32_DMA_DEV_TO_MEM 0x00
  95. #define STM32_DMA_MEM_TO_DEV 0x01
  96. #define STM32_DMA_MEM_TO_MEM 0x02
  97. /* DMA priority level */
  98. #define STM32_DMA_PRIORITY_LOW 0x00
  99. #define STM32_DMA_PRIORITY_MEDIUM 0x01
  100. #define STM32_DMA_PRIORITY_HIGH 0x02
  101. #define STM32_DMA_PRIORITY_VERY_HIGH 0x03
  102. /* DMA FIFO threshold selection */
  103. #define STM32_DMA_FIFO_THRESHOLD_1QUARTERFULL 0x00
  104. #define STM32_DMA_FIFO_THRESHOLD_HALFFULL 0x01
  105. #define STM32_DMA_FIFO_THRESHOLD_3QUARTERSFULL 0x02
  106. #define STM32_DMA_FIFO_THRESHOLD_FULL 0x03
  107. #define STM32_DMA_MAX_DATA_ITEMS 0xffff
  108. /*
  109. * Valid transfer starts from @0 to @0xFFFE leading to unaligned scatter
  110. * gather at boundary. Thus it's safer to round down this value on FIFO
  111. * size (16 Bytes)
  112. */
  113. #define STM32_DMA_ALIGNED_MAX_DATA_ITEMS \
  114. ALIGN_DOWN(STM32_DMA_MAX_DATA_ITEMS, 16)
  115. #define STM32_DMA_MAX_CHANNELS 0x08
  116. #define STM32_DMA_MAX_REQUEST_ID 0x08
  117. #define STM32_DMA_MAX_DATA_PARAM 0x03
  118. #define STM32_DMA_FIFO_SIZE 16 /* FIFO is 16 bytes */
  119. #define STM32_DMA_MIN_BURST 4
  120. #define STM32_DMA_MAX_BURST 16
  121. /* DMA Features */
  122. #define STM32_DMA_THRESHOLD_FTR_MASK GENMASK(1, 0)
  123. #define STM32_DMA_THRESHOLD_FTR_GET(n) ((n) & STM32_DMA_THRESHOLD_FTR_MASK)
  124. enum stm32_dma_width {
  125. STM32_DMA_BYTE,
  126. STM32_DMA_HALF_WORD,
  127. STM32_DMA_WORD,
  128. };
  129. enum stm32_dma_burst_size {
  130. STM32_DMA_BURST_SINGLE,
  131. STM32_DMA_BURST_INCR4,
  132. STM32_DMA_BURST_INCR8,
  133. STM32_DMA_BURST_INCR16,
  134. };
  135. /**
  136. * struct stm32_dma_cfg - STM32 DMA custom configuration
  137. * @channel_id: channel ID
  138. * @request_line: DMA request
  139. * @stream_config: 32bit mask specifying the DMA channel configuration
  140. * @features: 32bit mask specifying the DMA Feature list
  141. */
  142. struct stm32_dma_cfg {
  143. u32 channel_id;
  144. u32 request_line;
  145. u32 stream_config;
  146. u32 features;
  147. };
  148. struct stm32_dma_chan_reg {
  149. u32 dma_lisr;
  150. u32 dma_hisr;
  151. u32 dma_lifcr;
  152. u32 dma_hifcr;
  153. u32 dma_scr;
  154. u32 dma_sndtr;
  155. u32 dma_spar;
  156. u32 dma_sm0ar;
  157. u32 dma_sm1ar;
  158. u32 dma_sfcr;
  159. };
  160. struct stm32_dma_sg_req {
  161. u32 len;
  162. struct stm32_dma_chan_reg chan_reg;
  163. };
  164. struct stm32_dma_desc {
  165. struct virt_dma_desc vdesc;
  166. bool cyclic;
  167. u32 num_sgs;
  168. struct stm32_dma_sg_req sg_req[];
  169. };
  170. struct stm32_dma_chan {
  171. struct virt_dma_chan vchan;
  172. bool config_init;
  173. bool busy;
  174. u32 id;
  175. u32 irq;
  176. struct stm32_dma_desc *desc;
  177. u32 next_sg;
  178. struct dma_slave_config dma_sconfig;
  179. struct stm32_dma_chan_reg chan_reg;
  180. u32 threshold;
  181. u32 mem_burst;
  182. u32 mem_width;
  183. };
  184. struct stm32_dma_device {
  185. struct dma_device ddev;
  186. void __iomem *base;
  187. struct clk *clk;
  188. struct reset_control *rst;
  189. bool mem2mem;
  190. struct stm32_dma_chan chan[STM32_DMA_MAX_CHANNELS];
  191. };
  192. static struct stm32_dma_device *stm32_dma_get_dev(struct stm32_dma_chan *chan)
  193. {
  194. return container_of(chan->vchan.chan.device, struct stm32_dma_device,
  195. ddev);
  196. }
  197. static struct stm32_dma_chan *to_stm32_dma_chan(struct dma_chan *c)
  198. {
  199. return container_of(c, struct stm32_dma_chan, vchan.chan);
  200. }
  201. static struct stm32_dma_desc *to_stm32_dma_desc(struct virt_dma_desc *vdesc)
  202. {
  203. return container_of(vdesc, struct stm32_dma_desc, vdesc);
  204. }
  205. static struct device *chan2dev(struct stm32_dma_chan *chan)
  206. {
  207. return &chan->vchan.chan.dev->device;
  208. }
  209. static u32 stm32_dma_read(struct stm32_dma_device *dmadev, u32 reg)
  210. {
  211. return readl_relaxed(dmadev->base + reg);
  212. }
  213. static void stm32_dma_write(struct stm32_dma_device *dmadev, u32 reg, u32 val)
  214. {
  215. writel_relaxed(val, dmadev->base + reg);
  216. }
  217. static struct stm32_dma_desc *stm32_dma_alloc_desc(u32 num_sgs)
  218. {
  219. return kzalloc(sizeof(struct stm32_dma_desc) +
  220. sizeof(struct stm32_dma_sg_req) * num_sgs, GFP_NOWAIT);
  221. }
  222. static int stm32_dma_get_width(struct stm32_dma_chan *chan,
  223. enum dma_slave_buswidth width)
  224. {
  225. switch (width) {
  226. case DMA_SLAVE_BUSWIDTH_1_BYTE:
  227. return STM32_DMA_BYTE;
  228. case DMA_SLAVE_BUSWIDTH_2_BYTES:
  229. return STM32_DMA_HALF_WORD;
  230. case DMA_SLAVE_BUSWIDTH_4_BYTES:
  231. return STM32_DMA_WORD;
  232. default:
  233. dev_err(chan2dev(chan), "Dma bus width not supported\n");
  234. return -EINVAL;
  235. }
  236. }
  237. static enum dma_slave_buswidth stm32_dma_get_max_width(u32 buf_len,
  238. u32 threshold)
  239. {
  240. enum dma_slave_buswidth max_width;
  241. if (threshold == STM32_DMA_FIFO_THRESHOLD_FULL)
  242. max_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  243. else
  244. max_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
  245. while ((buf_len < max_width || buf_len % max_width) &&
  246. max_width > DMA_SLAVE_BUSWIDTH_1_BYTE)
  247. max_width = max_width >> 1;
  248. return max_width;
  249. }
  250. static bool stm32_dma_fifo_threshold_is_allowed(u32 burst, u32 threshold,
  251. enum dma_slave_buswidth width)
  252. {
  253. u32 remaining;
  254. if (width != DMA_SLAVE_BUSWIDTH_UNDEFINED) {
  255. if (burst != 0) {
  256. /*
  257. * If number of beats fit in several whole bursts
  258. * this configuration is allowed.
  259. */
  260. remaining = ((STM32_DMA_FIFO_SIZE / width) *
  261. (threshold + 1) / 4) % burst;
  262. if (remaining == 0)
  263. return true;
  264. } else {
  265. return true;
  266. }
  267. }
  268. return false;
  269. }
  270. static bool stm32_dma_is_burst_possible(u32 buf_len, u32 threshold)
  271. {
  272. switch (threshold) {
  273. case STM32_DMA_FIFO_THRESHOLD_FULL:
  274. if (buf_len >= STM32_DMA_MAX_BURST)
  275. return true;
  276. else
  277. return false;
  278. case STM32_DMA_FIFO_THRESHOLD_HALFFULL:
  279. if (buf_len >= STM32_DMA_MAX_BURST / 2)
  280. return true;
  281. else
  282. return false;
  283. default:
  284. return false;
  285. }
  286. }
  287. static u32 stm32_dma_get_best_burst(u32 buf_len, u32 max_burst, u32 threshold,
  288. enum dma_slave_buswidth width)
  289. {
  290. u32 best_burst = max_burst;
  291. if (best_burst == 1 || !stm32_dma_is_burst_possible(buf_len, threshold))
  292. return 0;
  293. while ((buf_len < best_burst * width && best_burst > 1) ||
  294. !stm32_dma_fifo_threshold_is_allowed(best_burst, threshold,
  295. width)) {
  296. if (best_burst > STM32_DMA_MIN_BURST)
  297. best_burst = best_burst >> 1;
  298. else
  299. best_burst = 0;
  300. }
  301. return best_burst;
  302. }
  303. static int stm32_dma_get_burst(struct stm32_dma_chan *chan, u32 maxburst)
  304. {
  305. switch (maxburst) {
  306. case 0:
  307. case 1:
  308. return STM32_DMA_BURST_SINGLE;
  309. case 4:
  310. return STM32_DMA_BURST_INCR4;
  311. case 8:
  312. return STM32_DMA_BURST_INCR8;
  313. case 16:
  314. return STM32_DMA_BURST_INCR16;
  315. default:
  316. dev_err(chan2dev(chan), "Dma burst size not supported\n");
  317. return -EINVAL;
  318. }
  319. }
  320. static void stm32_dma_set_fifo_config(struct stm32_dma_chan *chan,
  321. u32 src_burst, u32 dst_burst)
  322. {
  323. chan->chan_reg.dma_sfcr &= ~STM32_DMA_SFCR_MASK;
  324. chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_DMEIE;
  325. if (!src_burst && !dst_burst) {
  326. /* Using direct mode */
  327. chan->chan_reg.dma_scr |= STM32_DMA_SCR_DMEIE;
  328. } else {
  329. /* Using FIFO mode */
  330. chan->chan_reg.dma_sfcr |= STM32_DMA_SFCR_MASK;
  331. }
  332. }
  333. static int stm32_dma_slave_config(struct dma_chan *c,
  334. struct dma_slave_config *config)
  335. {
  336. struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
  337. memcpy(&chan->dma_sconfig, config, sizeof(*config));
  338. chan->config_init = true;
  339. return 0;
  340. }
  341. static u32 stm32_dma_irq_status(struct stm32_dma_chan *chan)
  342. {
  343. struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
  344. u32 flags, dma_isr;
  345. /*
  346. * Read "flags" from DMA_xISR register corresponding to the selected
  347. * DMA channel at the correct bit offset inside that register.
  348. *
  349. * If (ch % 4) is 2 or 3, left shift the mask by 16 bits.
  350. * If (ch % 4) is 1 or 3, additionally left shift the mask by 6 bits.
  351. */
  352. if (chan->id & 4)
  353. dma_isr = stm32_dma_read(dmadev, STM32_DMA_HISR);
  354. else
  355. dma_isr = stm32_dma_read(dmadev, STM32_DMA_LISR);
  356. flags = dma_isr >> (((chan->id & 2) << 3) | ((chan->id & 1) * 6));
  357. return flags & STM32_DMA_MASKI;
  358. }
  359. static void stm32_dma_irq_clear(struct stm32_dma_chan *chan, u32 flags)
  360. {
  361. struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
  362. u32 dma_ifcr;
  363. /*
  364. * Write "flags" to the DMA_xIFCR register corresponding to the selected
  365. * DMA channel at the correct bit offset inside that register.
  366. *
  367. * If (ch % 4) is 2 or 3, left shift the mask by 16 bits.
  368. * If (ch % 4) is 1 or 3, additionally left shift the mask by 6 bits.
  369. */
  370. flags &= STM32_DMA_MASKI;
  371. dma_ifcr = flags << (((chan->id & 2) << 3) | ((chan->id & 1) * 6));
  372. if (chan->id & 4)
  373. stm32_dma_write(dmadev, STM32_DMA_HIFCR, dma_ifcr);
  374. else
  375. stm32_dma_write(dmadev, STM32_DMA_LIFCR, dma_ifcr);
  376. }
  377. static int stm32_dma_disable_chan(struct stm32_dma_chan *chan)
  378. {
  379. struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
  380. unsigned long timeout = jiffies + msecs_to_jiffies(5000);
  381. u32 dma_scr, id;
  382. id = chan->id;
  383. dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id));
  384. if (dma_scr & STM32_DMA_SCR_EN) {
  385. dma_scr &= ~STM32_DMA_SCR_EN;
  386. stm32_dma_write(dmadev, STM32_DMA_SCR(id), dma_scr);
  387. do {
  388. dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id));
  389. dma_scr &= STM32_DMA_SCR_EN;
  390. if (!dma_scr)
  391. break;
  392. if (time_after_eq(jiffies, timeout)) {
  393. dev_err(chan2dev(chan), "%s: timeout!\n",
  394. __func__);
  395. return -EBUSY;
  396. }
  397. cond_resched();
  398. } while (1);
  399. }
  400. return 0;
  401. }
  402. static void stm32_dma_stop(struct stm32_dma_chan *chan)
  403. {
  404. struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
  405. u32 dma_scr, dma_sfcr, status;
  406. int ret;
  407. /* Disable interrupts */
  408. dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id));
  409. dma_scr &= ~STM32_DMA_SCR_IRQ_MASK;
  410. stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), dma_scr);
  411. dma_sfcr = stm32_dma_read(dmadev, STM32_DMA_SFCR(chan->id));
  412. dma_sfcr &= ~STM32_DMA_SFCR_FEIE;
  413. stm32_dma_write(dmadev, STM32_DMA_SFCR(chan->id), dma_sfcr);
  414. /* Disable DMA */
  415. ret = stm32_dma_disable_chan(chan);
  416. if (ret < 0)
  417. return;
  418. /* Clear interrupt status if it is there */
  419. status = stm32_dma_irq_status(chan);
  420. if (status) {
  421. dev_dbg(chan2dev(chan), "%s(): clearing interrupt: 0x%08x\n",
  422. __func__, status);
  423. stm32_dma_irq_clear(chan, status);
  424. }
  425. chan->busy = false;
  426. }
  427. static int stm32_dma_terminate_all(struct dma_chan *c)
  428. {
  429. struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
  430. unsigned long flags;
  431. LIST_HEAD(head);
  432. spin_lock_irqsave(&chan->vchan.lock, flags);
  433. if (chan->busy) {
  434. stm32_dma_stop(chan);
  435. chan->desc = NULL;
  436. }
  437. vchan_get_all_descriptors(&chan->vchan, &head);
  438. spin_unlock_irqrestore(&chan->vchan.lock, flags);
  439. vchan_dma_desc_free_list(&chan->vchan, &head);
  440. return 0;
  441. }
  442. static void stm32_dma_synchronize(struct dma_chan *c)
  443. {
  444. struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
  445. vchan_synchronize(&chan->vchan);
  446. }
  447. static void stm32_dma_dump_reg(struct stm32_dma_chan *chan)
  448. {
  449. struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
  450. u32 scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id));
  451. u32 ndtr = stm32_dma_read(dmadev, STM32_DMA_SNDTR(chan->id));
  452. u32 spar = stm32_dma_read(dmadev, STM32_DMA_SPAR(chan->id));
  453. u32 sm0ar = stm32_dma_read(dmadev, STM32_DMA_SM0AR(chan->id));
  454. u32 sm1ar = stm32_dma_read(dmadev, STM32_DMA_SM1AR(chan->id));
  455. u32 sfcr = stm32_dma_read(dmadev, STM32_DMA_SFCR(chan->id));
  456. dev_dbg(chan2dev(chan), "SCR: 0x%08x\n", scr);
  457. dev_dbg(chan2dev(chan), "NDTR: 0x%08x\n", ndtr);
  458. dev_dbg(chan2dev(chan), "SPAR: 0x%08x\n", spar);
  459. dev_dbg(chan2dev(chan), "SM0AR: 0x%08x\n", sm0ar);
  460. dev_dbg(chan2dev(chan), "SM1AR: 0x%08x\n", sm1ar);
  461. dev_dbg(chan2dev(chan), "SFCR: 0x%08x\n", sfcr);
  462. }
  463. static void stm32_dma_configure_next_sg(struct stm32_dma_chan *chan);
  464. static void stm32_dma_start_transfer(struct stm32_dma_chan *chan)
  465. {
  466. struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
  467. struct virt_dma_desc *vdesc;
  468. struct stm32_dma_sg_req *sg_req;
  469. struct stm32_dma_chan_reg *reg;
  470. u32 status;
  471. int ret;
  472. ret = stm32_dma_disable_chan(chan);
  473. if (ret < 0)
  474. return;
  475. if (!chan->desc) {
  476. vdesc = vchan_next_desc(&chan->vchan);
  477. if (!vdesc)
  478. return;
  479. chan->desc = to_stm32_dma_desc(vdesc);
  480. chan->next_sg = 0;
  481. }
  482. if (chan->next_sg == chan->desc->num_sgs)
  483. chan->next_sg = 0;
  484. sg_req = &chan->desc->sg_req[chan->next_sg];
  485. reg = &sg_req->chan_reg;
  486. stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), reg->dma_scr);
  487. stm32_dma_write(dmadev, STM32_DMA_SPAR(chan->id), reg->dma_spar);
  488. stm32_dma_write(dmadev, STM32_DMA_SM0AR(chan->id), reg->dma_sm0ar);
  489. stm32_dma_write(dmadev, STM32_DMA_SFCR(chan->id), reg->dma_sfcr);
  490. stm32_dma_write(dmadev, STM32_DMA_SM1AR(chan->id), reg->dma_sm1ar);
  491. stm32_dma_write(dmadev, STM32_DMA_SNDTR(chan->id), reg->dma_sndtr);
  492. chan->next_sg++;
  493. /* Clear interrupt status if it is there */
  494. status = stm32_dma_irq_status(chan);
  495. if (status)
  496. stm32_dma_irq_clear(chan, status);
  497. if (chan->desc->cyclic)
  498. stm32_dma_configure_next_sg(chan);
  499. stm32_dma_dump_reg(chan);
  500. /* Start DMA */
  501. reg->dma_scr |= STM32_DMA_SCR_EN;
  502. stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), reg->dma_scr);
  503. chan->busy = true;
  504. dev_dbg(chan2dev(chan), "vchan %pK: started\n", &chan->vchan);
  505. }
  506. static void stm32_dma_configure_next_sg(struct stm32_dma_chan *chan)
  507. {
  508. struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
  509. struct stm32_dma_sg_req *sg_req;
  510. u32 dma_scr, dma_sm0ar, dma_sm1ar, id;
  511. id = chan->id;
  512. dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id));
  513. if (dma_scr & STM32_DMA_SCR_DBM) {
  514. if (chan->next_sg == chan->desc->num_sgs)
  515. chan->next_sg = 0;
  516. sg_req = &chan->desc->sg_req[chan->next_sg];
  517. if (dma_scr & STM32_DMA_SCR_CT) {
  518. dma_sm0ar = sg_req->chan_reg.dma_sm0ar;
  519. stm32_dma_write(dmadev, STM32_DMA_SM0AR(id), dma_sm0ar);
  520. dev_dbg(chan2dev(chan), "CT=1 <=> SM0AR: 0x%08x\n",
  521. stm32_dma_read(dmadev, STM32_DMA_SM0AR(id)));
  522. } else {
  523. dma_sm1ar = sg_req->chan_reg.dma_sm1ar;
  524. stm32_dma_write(dmadev, STM32_DMA_SM1AR(id), dma_sm1ar);
  525. dev_dbg(chan2dev(chan), "CT=0 <=> SM1AR: 0x%08x\n",
  526. stm32_dma_read(dmadev, STM32_DMA_SM1AR(id)));
  527. }
  528. }
  529. }
  530. static void stm32_dma_handle_chan_done(struct stm32_dma_chan *chan)
  531. {
  532. if (chan->desc) {
  533. if (chan->desc->cyclic) {
  534. vchan_cyclic_callback(&chan->desc->vdesc);
  535. chan->next_sg++;
  536. stm32_dma_configure_next_sg(chan);
  537. } else {
  538. chan->busy = false;
  539. if (chan->next_sg == chan->desc->num_sgs) {
  540. list_del(&chan->desc->vdesc.node);
  541. vchan_cookie_complete(&chan->desc->vdesc);
  542. chan->desc = NULL;
  543. }
  544. stm32_dma_start_transfer(chan);
  545. }
  546. }
  547. }
  548. static irqreturn_t stm32_dma_chan_irq(int irq, void *devid)
  549. {
  550. struct stm32_dma_chan *chan = devid;
  551. struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
  552. u32 status, scr;
  553. spin_lock(&chan->vchan.lock);
  554. status = stm32_dma_irq_status(chan);
  555. scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id));
  556. if (status & STM32_DMA_TCI) {
  557. stm32_dma_irq_clear(chan, STM32_DMA_TCI);
  558. if (scr & STM32_DMA_SCR_TCIE)
  559. stm32_dma_handle_chan_done(chan);
  560. status &= ~STM32_DMA_TCI;
  561. }
  562. if (status & STM32_DMA_HTI) {
  563. stm32_dma_irq_clear(chan, STM32_DMA_HTI);
  564. status &= ~STM32_DMA_HTI;
  565. }
  566. if (status & STM32_DMA_FEI) {
  567. stm32_dma_irq_clear(chan, STM32_DMA_FEI);
  568. status &= ~STM32_DMA_FEI;
  569. if (!(scr & STM32_DMA_SCR_EN))
  570. dev_err(chan2dev(chan), "FIFO Error\n");
  571. else
  572. dev_dbg(chan2dev(chan), "FIFO over/underrun\n");
  573. }
  574. if (status) {
  575. stm32_dma_irq_clear(chan, status);
  576. dev_err(chan2dev(chan), "DMA error: status=0x%08x\n", status);
  577. if (!(scr & STM32_DMA_SCR_EN))
  578. dev_err(chan2dev(chan), "chan disabled by HW\n");
  579. }
  580. spin_unlock(&chan->vchan.lock);
  581. return IRQ_HANDLED;
  582. }
  583. static void stm32_dma_issue_pending(struct dma_chan *c)
  584. {
  585. struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
  586. unsigned long flags;
  587. spin_lock_irqsave(&chan->vchan.lock, flags);
  588. if (vchan_issue_pending(&chan->vchan) && !chan->desc && !chan->busy) {
  589. dev_dbg(chan2dev(chan), "vchan %pK: issued\n", &chan->vchan);
  590. stm32_dma_start_transfer(chan);
  591. }
  592. spin_unlock_irqrestore(&chan->vchan.lock, flags);
  593. }
  594. static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan,
  595. enum dma_transfer_direction direction,
  596. enum dma_slave_buswidth *buswidth,
  597. u32 buf_len)
  598. {
  599. enum dma_slave_buswidth src_addr_width, dst_addr_width;
  600. int src_bus_width, dst_bus_width;
  601. int src_burst_size, dst_burst_size;
  602. u32 src_maxburst, dst_maxburst, src_best_burst, dst_best_burst;
  603. u32 dma_scr, threshold;
  604. src_addr_width = chan->dma_sconfig.src_addr_width;
  605. dst_addr_width = chan->dma_sconfig.dst_addr_width;
  606. src_maxburst = chan->dma_sconfig.src_maxburst;
  607. dst_maxburst = chan->dma_sconfig.dst_maxburst;
  608. threshold = chan->threshold;
  609. switch (direction) {
  610. case DMA_MEM_TO_DEV:
  611. /* Set device data size */
  612. dst_bus_width = stm32_dma_get_width(chan, dst_addr_width);
  613. if (dst_bus_width < 0)
  614. return dst_bus_width;
  615. /* Set device burst size */
  616. dst_best_burst = stm32_dma_get_best_burst(buf_len,
  617. dst_maxburst,
  618. threshold,
  619. dst_addr_width);
  620. dst_burst_size = stm32_dma_get_burst(chan, dst_best_burst);
  621. if (dst_burst_size < 0)
  622. return dst_burst_size;
  623. /* Set memory data size */
  624. src_addr_width = stm32_dma_get_max_width(buf_len, threshold);
  625. chan->mem_width = src_addr_width;
  626. src_bus_width = stm32_dma_get_width(chan, src_addr_width);
  627. if (src_bus_width < 0)
  628. return src_bus_width;
  629. /* Set memory burst size */
  630. src_maxburst = STM32_DMA_MAX_BURST;
  631. src_best_burst = stm32_dma_get_best_burst(buf_len,
  632. src_maxburst,
  633. threshold,
  634. src_addr_width);
  635. src_burst_size = stm32_dma_get_burst(chan, src_best_burst);
  636. if (src_burst_size < 0)
  637. return src_burst_size;
  638. dma_scr = STM32_DMA_SCR_DIR(STM32_DMA_MEM_TO_DEV) |
  639. STM32_DMA_SCR_PSIZE(dst_bus_width) |
  640. STM32_DMA_SCR_MSIZE(src_bus_width) |
  641. STM32_DMA_SCR_PBURST(dst_burst_size) |
  642. STM32_DMA_SCR_MBURST(src_burst_size);
  643. /* Set FIFO threshold */
  644. chan->chan_reg.dma_sfcr &= ~STM32_DMA_SFCR_FTH_MASK;
  645. chan->chan_reg.dma_sfcr |= STM32_DMA_SFCR_FTH(threshold);
  646. /* Set peripheral address */
  647. chan->chan_reg.dma_spar = chan->dma_sconfig.dst_addr;
  648. *buswidth = dst_addr_width;
  649. break;
  650. case DMA_DEV_TO_MEM:
  651. /* Set device data size */
  652. src_bus_width = stm32_dma_get_width(chan, src_addr_width);
  653. if (src_bus_width < 0)
  654. return src_bus_width;
  655. /* Set device burst size */
  656. src_best_burst = stm32_dma_get_best_burst(buf_len,
  657. src_maxburst,
  658. threshold,
  659. src_addr_width);
  660. chan->mem_burst = src_best_burst;
  661. src_burst_size = stm32_dma_get_burst(chan, src_best_burst);
  662. if (src_burst_size < 0)
  663. return src_burst_size;
  664. /* Set memory data size */
  665. dst_addr_width = stm32_dma_get_max_width(buf_len, threshold);
  666. chan->mem_width = dst_addr_width;
  667. dst_bus_width = stm32_dma_get_width(chan, dst_addr_width);
  668. if (dst_bus_width < 0)
  669. return dst_bus_width;
  670. /* Set memory burst size */
  671. dst_maxburst = STM32_DMA_MAX_BURST;
  672. dst_best_burst = stm32_dma_get_best_burst(buf_len,
  673. dst_maxburst,
  674. threshold,
  675. dst_addr_width);
  676. chan->mem_burst = dst_best_burst;
  677. dst_burst_size = stm32_dma_get_burst(chan, dst_best_burst);
  678. if (dst_burst_size < 0)
  679. return dst_burst_size;
  680. dma_scr = STM32_DMA_SCR_DIR(STM32_DMA_DEV_TO_MEM) |
  681. STM32_DMA_SCR_PSIZE(src_bus_width) |
  682. STM32_DMA_SCR_MSIZE(dst_bus_width) |
  683. STM32_DMA_SCR_PBURST(src_burst_size) |
  684. STM32_DMA_SCR_MBURST(dst_burst_size);
  685. /* Set FIFO threshold */
  686. chan->chan_reg.dma_sfcr &= ~STM32_DMA_SFCR_FTH_MASK;
  687. chan->chan_reg.dma_sfcr |= STM32_DMA_SFCR_FTH(threshold);
  688. /* Set peripheral address */
  689. chan->chan_reg.dma_spar = chan->dma_sconfig.src_addr;
  690. *buswidth = chan->dma_sconfig.src_addr_width;
  691. break;
  692. default:
  693. dev_err(chan2dev(chan), "Dma direction is not supported\n");
  694. return -EINVAL;
  695. }
  696. stm32_dma_set_fifo_config(chan, src_best_burst, dst_best_burst);
  697. /* Set DMA control register */
  698. chan->chan_reg.dma_scr &= ~(STM32_DMA_SCR_DIR_MASK |
  699. STM32_DMA_SCR_PSIZE_MASK | STM32_DMA_SCR_MSIZE_MASK |
  700. STM32_DMA_SCR_PBURST_MASK | STM32_DMA_SCR_MBURST_MASK);
  701. chan->chan_reg.dma_scr |= dma_scr;
  702. return 0;
  703. }
  704. static void stm32_dma_clear_reg(struct stm32_dma_chan_reg *regs)
  705. {
  706. memset(regs, 0, sizeof(struct stm32_dma_chan_reg));
  707. }
  708. static struct dma_async_tx_descriptor *stm32_dma_prep_slave_sg(
  709. struct dma_chan *c, struct scatterlist *sgl,
  710. u32 sg_len, enum dma_transfer_direction direction,
  711. unsigned long flags, void *context)
  712. {
  713. struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
  714. struct stm32_dma_desc *desc;
  715. struct scatterlist *sg;
  716. enum dma_slave_buswidth buswidth;
  717. u32 nb_data_items;
  718. int i, ret;
  719. if (!chan->config_init) {
  720. dev_err(chan2dev(chan), "dma channel is not configured\n");
  721. return NULL;
  722. }
  723. if (sg_len < 1) {
  724. dev_err(chan2dev(chan), "Invalid segment length %d\n", sg_len);
  725. return NULL;
  726. }
  727. desc = stm32_dma_alloc_desc(sg_len);
  728. if (!desc)
  729. return NULL;
  730. /* Set peripheral flow controller */
  731. if (chan->dma_sconfig.device_fc)
  732. chan->chan_reg.dma_scr |= STM32_DMA_SCR_PFCTRL;
  733. else
  734. chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_PFCTRL;
  735. for_each_sg(sgl, sg, sg_len, i) {
  736. ret = stm32_dma_set_xfer_param(chan, direction, &buswidth,
  737. sg_dma_len(sg));
  738. if (ret < 0)
  739. goto err;
  740. desc->sg_req[i].len = sg_dma_len(sg);
  741. nb_data_items = desc->sg_req[i].len / buswidth;
  742. if (nb_data_items > STM32_DMA_ALIGNED_MAX_DATA_ITEMS) {
  743. dev_err(chan2dev(chan), "nb items not supported\n");
  744. goto err;
  745. }
  746. stm32_dma_clear_reg(&desc->sg_req[i].chan_reg);
  747. desc->sg_req[i].chan_reg.dma_scr = chan->chan_reg.dma_scr;
  748. desc->sg_req[i].chan_reg.dma_sfcr = chan->chan_reg.dma_sfcr;
  749. desc->sg_req[i].chan_reg.dma_spar = chan->chan_reg.dma_spar;
  750. desc->sg_req[i].chan_reg.dma_sm0ar = sg_dma_address(sg);
  751. desc->sg_req[i].chan_reg.dma_sm1ar = sg_dma_address(sg);
  752. desc->sg_req[i].chan_reg.dma_sndtr = nb_data_items;
  753. }
  754. desc->num_sgs = sg_len;
  755. desc->cyclic = false;
  756. return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
  757. err:
  758. kfree(desc);
  759. return NULL;
  760. }
  761. static struct dma_async_tx_descriptor *stm32_dma_prep_dma_cyclic(
  762. struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len,
  763. size_t period_len, enum dma_transfer_direction direction,
  764. unsigned long flags)
  765. {
  766. struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
  767. struct stm32_dma_desc *desc;
  768. enum dma_slave_buswidth buswidth;
  769. u32 num_periods, nb_data_items;
  770. int i, ret;
  771. if (!buf_len || !period_len) {
  772. dev_err(chan2dev(chan), "Invalid buffer/period len\n");
  773. return NULL;
  774. }
  775. if (!chan->config_init) {
  776. dev_err(chan2dev(chan), "dma channel is not configured\n");
  777. return NULL;
  778. }
  779. if (buf_len % period_len) {
  780. dev_err(chan2dev(chan), "buf_len not multiple of period_len\n");
  781. return NULL;
  782. }
  783. /*
  784. * We allow to take more number of requests till DMA is
  785. * not started. The driver will loop over all requests.
  786. * Once DMA is started then new requests can be queued only after
  787. * terminating the DMA.
  788. */
  789. if (chan->busy) {
  790. dev_err(chan2dev(chan), "Request not allowed when dma busy\n");
  791. return NULL;
  792. }
  793. ret = stm32_dma_set_xfer_param(chan, direction, &buswidth, period_len);
  794. if (ret < 0)
  795. return NULL;
  796. nb_data_items = period_len / buswidth;
  797. if (nb_data_items > STM32_DMA_ALIGNED_MAX_DATA_ITEMS) {
  798. dev_err(chan2dev(chan), "number of items not supported\n");
  799. return NULL;
  800. }
  801. /* Enable Circular mode or double buffer mode */
  802. if (buf_len == period_len)
  803. chan->chan_reg.dma_scr |= STM32_DMA_SCR_CIRC;
  804. else
  805. chan->chan_reg.dma_scr |= STM32_DMA_SCR_DBM;
  806. /* Clear periph ctrl if client set it */
  807. chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_PFCTRL;
  808. num_periods = buf_len / period_len;
  809. desc = stm32_dma_alloc_desc(num_periods);
  810. if (!desc)
  811. return NULL;
  812. for (i = 0; i < num_periods; i++) {
  813. desc->sg_req[i].len = period_len;
  814. stm32_dma_clear_reg(&desc->sg_req[i].chan_reg);
  815. desc->sg_req[i].chan_reg.dma_scr = chan->chan_reg.dma_scr;
  816. desc->sg_req[i].chan_reg.dma_sfcr = chan->chan_reg.dma_sfcr;
  817. desc->sg_req[i].chan_reg.dma_spar = chan->chan_reg.dma_spar;
  818. desc->sg_req[i].chan_reg.dma_sm0ar = buf_addr;
  819. desc->sg_req[i].chan_reg.dma_sm1ar = buf_addr;
  820. desc->sg_req[i].chan_reg.dma_sndtr = nb_data_items;
  821. buf_addr += period_len;
  822. }
  823. desc->num_sgs = num_periods;
  824. desc->cyclic = true;
  825. return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
  826. }
  827. static struct dma_async_tx_descriptor *stm32_dma_prep_dma_memcpy(
  828. struct dma_chan *c, dma_addr_t dest,
  829. dma_addr_t src, size_t len, unsigned long flags)
  830. {
  831. struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
  832. enum dma_slave_buswidth max_width;
  833. struct stm32_dma_desc *desc;
  834. size_t xfer_count, offset;
  835. u32 num_sgs, best_burst, dma_burst, threshold;
  836. int i;
  837. num_sgs = DIV_ROUND_UP(len, STM32_DMA_ALIGNED_MAX_DATA_ITEMS);
  838. desc = stm32_dma_alloc_desc(num_sgs);
  839. if (!desc)
  840. return NULL;
  841. threshold = chan->threshold;
  842. for (offset = 0, i = 0; offset < len; offset += xfer_count, i++) {
  843. xfer_count = min_t(size_t, len - offset,
  844. STM32_DMA_ALIGNED_MAX_DATA_ITEMS);
  845. /* Compute best burst size */
  846. max_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
  847. best_burst = stm32_dma_get_best_burst(len, STM32_DMA_MAX_BURST,
  848. threshold, max_width);
  849. dma_burst = stm32_dma_get_burst(chan, best_burst);
  850. stm32_dma_clear_reg(&desc->sg_req[i].chan_reg);
  851. desc->sg_req[i].chan_reg.dma_scr =
  852. STM32_DMA_SCR_DIR(STM32_DMA_MEM_TO_MEM) |
  853. STM32_DMA_SCR_PBURST(dma_burst) |
  854. STM32_DMA_SCR_MBURST(dma_burst) |
  855. STM32_DMA_SCR_MINC |
  856. STM32_DMA_SCR_PINC |
  857. STM32_DMA_SCR_TCIE |
  858. STM32_DMA_SCR_TEIE;
  859. desc->sg_req[i].chan_reg.dma_sfcr |= STM32_DMA_SFCR_MASK;
  860. desc->sg_req[i].chan_reg.dma_sfcr |=
  861. STM32_DMA_SFCR_FTH(threshold);
  862. desc->sg_req[i].chan_reg.dma_spar = src + offset;
  863. desc->sg_req[i].chan_reg.dma_sm0ar = dest + offset;
  864. desc->sg_req[i].chan_reg.dma_sndtr = xfer_count;
  865. desc->sg_req[i].len = xfer_count;
  866. }
  867. desc->num_sgs = num_sgs;
  868. desc->cyclic = false;
  869. return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
  870. }
  871. static u32 stm32_dma_get_remaining_bytes(struct stm32_dma_chan *chan)
  872. {
  873. u32 dma_scr, width, ndtr;
  874. struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
  875. dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id));
  876. width = STM32_DMA_SCR_PSIZE_GET(dma_scr);
  877. ndtr = stm32_dma_read(dmadev, STM32_DMA_SNDTR(chan->id));
  878. return ndtr << width;
  879. }
  880. static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan,
  881. struct stm32_dma_desc *desc,
  882. u32 next_sg)
  883. {
  884. u32 modulo, burst_size;
  885. u32 residue = 0;
  886. int i;
  887. /*
  888. * In cyclic mode, for the last period, residue = remaining bytes from
  889. * NDTR
  890. */
  891. if (chan->desc->cyclic && next_sg == 0) {
  892. residue = stm32_dma_get_remaining_bytes(chan);
  893. goto end;
  894. }
  895. /*
  896. * For all other periods in cyclic mode, and in sg mode,
  897. * residue = remaining bytes from NDTR + remaining periods/sg to be
  898. * transferred
  899. */
  900. for (i = next_sg; i < desc->num_sgs; i++)
  901. residue += desc->sg_req[i].len;
  902. residue += stm32_dma_get_remaining_bytes(chan);
  903. end:
  904. if (!chan->mem_burst)
  905. return residue;
  906. burst_size = chan->mem_burst * chan->mem_width;
  907. modulo = residue % burst_size;
  908. if (modulo)
  909. residue = residue - modulo + burst_size;
  910. return residue;
  911. }
  912. static enum dma_status stm32_dma_tx_status(struct dma_chan *c,
  913. dma_cookie_t cookie,
  914. struct dma_tx_state *state)
  915. {
  916. struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
  917. struct virt_dma_desc *vdesc;
  918. enum dma_status status;
  919. unsigned long flags;
  920. u32 residue = 0;
  921. status = dma_cookie_status(c, cookie, state);
  922. if (status == DMA_COMPLETE || !state)
  923. return status;
  924. spin_lock_irqsave(&chan->vchan.lock, flags);
  925. vdesc = vchan_find_desc(&chan->vchan, cookie);
  926. if (chan->desc && cookie == chan->desc->vdesc.tx.cookie)
  927. residue = stm32_dma_desc_residue(chan, chan->desc,
  928. chan->next_sg);
  929. else if (vdesc)
  930. residue = stm32_dma_desc_residue(chan,
  931. to_stm32_dma_desc(vdesc), 0);
  932. dma_set_residue(state, residue);
  933. spin_unlock_irqrestore(&chan->vchan.lock, flags);
  934. return status;
  935. }
  936. static int stm32_dma_alloc_chan_resources(struct dma_chan *c)
  937. {
  938. struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
  939. struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
  940. int ret;
  941. chan->config_init = false;
  942. ret = clk_prepare_enable(dmadev->clk);
  943. if (ret < 0) {
  944. dev_err(chan2dev(chan), "clk_prepare_enable failed: %d\n", ret);
  945. return ret;
  946. }
  947. ret = stm32_dma_disable_chan(chan);
  948. if (ret < 0)
  949. clk_disable_unprepare(dmadev->clk);
  950. return ret;
  951. }
  952. static void stm32_dma_free_chan_resources(struct dma_chan *c)
  953. {
  954. struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
  955. struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
  956. unsigned long flags;
  957. dev_dbg(chan2dev(chan), "Freeing channel %d\n", chan->id);
  958. if (chan->busy) {
  959. spin_lock_irqsave(&chan->vchan.lock, flags);
  960. stm32_dma_stop(chan);
  961. chan->desc = NULL;
  962. spin_unlock_irqrestore(&chan->vchan.lock, flags);
  963. }
  964. clk_disable_unprepare(dmadev->clk);
  965. vchan_free_chan_resources(to_virt_chan(c));
  966. }
  967. static void stm32_dma_desc_free(struct virt_dma_desc *vdesc)
  968. {
  969. kfree(container_of(vdesc, struct stm32_dma_desc, vdesc));
  970. }
  971. static void stm32_dma_set_config(struct stm32_dma_chan *chan,
  972. struct stm32_dma_cfg *cfg)
  973. {
  974. stm32_dma_clear_reg(&chan->chan_reg);
  975. chan->chan_reg.dma_scr = cfg->stream_config & STM32_DMA_SCR_CFG_MASK;
  976. chan->chan_reg.dma_scr |= STM32_DMA_SCR_REQ(cfg->request_line);
  977. /* Enable Interrupts */
  978. chan->chan_reg.dma_scr |= STM32_DMA_SCR_TEIE | STM32_DMA_SCR_TCIE;
  979. chan->threshold = STM32_DMA_THRESHOLD_FTR_GET(cfg->features);
  980. }
  981. static struct dma_chan *stm32_dma_of_xlate(struct of_phandle_args *dma_spec,
  982. struct of_dma *ofdma)
  983. {
  984. struct stm32_dma_device *dmadev = ofdma->of_dma_data;
  985. struct device *dev = dmadev->ddev.dev;
  986. struct stm32_dma_cfg cfg;
  987. struct stm32_dma_chan *chan;
  988. struct dma_chan *c;
  989. if (dma_spec->args_count < 4) {
  990. dev_err(dev, "Bad number of cells\n");
  991. return NULL;
  992. }
  993. cfg.channel_id = dma_spec->args[0];
  994. cfg.request_line = dma_spec->args[1];
  995. cfg.stream_config = dma_spec->args[2];
  996. cfg.features = dma_spec->args[3];
  997. if (cfg.channel_id >= STM32_DMA_MAX_CHANNELS ||
  998. cfg.request_line >= STM32_DMA_MAX_REQUEST_ID) {
  999. dev_err(dev, "Bad channel and/or request id\n");
  1000. return NULL;
  1001. }
  1002. chan = &dmadev->chan[cfg.channel_id];
  1003. c = dma_get_slave_channel(&chan->vchan.chan);
  1004. if (!c) {
  1005. dev_err(dev, "No more channels available\n");
  1006. return NULL;
  1007. }
  1008. stm32_dma_set_config(chan, &cfg);
  1009. return c;
  1010. }
  1011. static const struct of_device_id stm32_dma_of_match[] = {
  1012. { .compatible = "st,stm32-dma", },
  1013. { /* sentinel */ },
  1014. };
  1015. MODULE_DEVICE_TABLE(of, stm32_dma_of_match);
  1016. static int stm32_dma_probe(struct platform_device *pdev)
  1017. {
  1018. struct stm32_dma_chan *chan;
  1019. struct stm32_dma_device *dmadev;
  1020. struct dma_device *dd;
  1021. const struct of_device_id *match;
  1022. struct resource *res;
  1023. int i, ret;
  1024. match = of_match_device(stm32_dma_of_match, &pdev->dev);
  1025. if (!match) {
  1026. dev_err(&pdev->dev, "Error: No device match found\n");
  1027. return -ENODEV;
  1028. }
  1029. dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev), GFP_KERNEL);
  1030. if (!dmadev)
  1031. return -ENOMEM;
  1032. dd = &dmadev->ddev;
  1033. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1034. dmadev->base = devm_ioremap_resource(&pdev->dev, res);
  1035. if (IS_ERR(dmadev->base))
  1036. return PTR_ERR(dmadev->base);
  1037. dmadev->clk = devm_clk_get(&pdev->dev, NULL);
  1038. if (IS_ERR(dmadev->clk)) {
  1039. dev_err(&pdev->dev, "Error: Missing controller clock\n");
  1040. return PTR_ERR(dmadev->clk);
  1041. }
  1042. dmadev->mem2mem = of_property_read_bool(pdev->dev.of_node,
  1043. "st,mem2mem");
  1044. dmadev->rst = devm_reset_control_get(&pdev->dev, NULL);
  1045. if (!IS_ERR(dmadev->rst)) {
  1046. reset_control_assert(dmadev->rst);
  1047. udelay(2);
  1048. reset_control_deassert(dmadev->rst);
  1049. }
  1050. dma_cap_set(DMA_SLAVE, dd->cap_mask);
  1051. dma_cap_set(DMA_PRIVATE, dd->cap_mask);
  1052. dma_cap_set(DMA_CYCLIC, dd->cap_mask);
  1053. dd->device_alloc_chan_resources = stm32_dma_alloc_chan_resources;
  1054. dd->device_free_chan_resources = stm32_dma_free_chan_resources;
  1055. dd->device_tx_status = stm32_dma_tx_status;
  1056. dd->device_issue_pending = stm32_dma_issue_pending;
  1057. dd->device_prep_slave_sg = stm32_dma_prep_slave_sg;
  1058. dd->device_prep_dma_cyclic = stm32_dma_prep_dma_cyclic;
  1059. dd->device_config = stm32_dma_slave_config;
  1060. dd->device_terminate_all = stm32_dma_terminate_all;
  1061. dd->device_synchronize = stm32_dma_synchronize;
  1062. dd->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
  1063. BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
  1064. BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
  1065. dd->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
  1066. BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
  1067. BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
  1068. dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
  1069. dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
  1070. dd->max_burst = STM32_DMA_MAX_BURST;
  1071. dd->dev = &pdev->dev;
  1072. INIT_LIST_HEAD(&dd->channels);
  1073. if (dmadev->mem2mem) {
  1074. dma_cap_set(DMA_MEMCPY, dd->cap_mask);
  1075. dd->device_prep_dma_memcpy = stm32_dma_prep_dma_memcpy;
  1076. dd->directions |= BIT(DMA_MEM_TO_MEM);
  1077. }
  1078. for (i = 0; i < STM32_DMA_MAX_CHANNELS; i++) {
  1079. chan = &dmadev->chan[i];
  1080. chan->id = i;
  1081. chan->vchan.desc_free = stm32_dma_desc_free;
  1082. vchan_init(&chan->vchan, dd);
  1083. }
  1084. ret = dma_async_device_register(dd);
  1085. if (ret)
  1086. return ret;
  1087. for (i = 0; i < STM32_DMA_MAX_CHANNELS; i++) {
  1088. chan = &dmadev->chan[i];
  1089. res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
  1090. if (!res) {
  1091. ret = -EINVAL;
  1092. dev_err(&pdev->dev, "No irq resource for chan %d\n", i);
  1093. goto err_unregister;
  1094. }
  1095. chan->irq = res->start;
  1096. ret = devm_request_irq(&pdev->dev, chan->irq,
  1097. stm32_dma_chan_irq, 0,
  1098. dev_name(chan2dev(chan)), chan);
  1099. if (ret) {
  1100. dev_err(&pdev->dev,
  1101. "request_irq failed with err %d channel %d\n",
  1102. ret, i);
  1103. goto err_unregister;
  1104. }
  1105. }
  1106. ret = of_dma_controller_register(pdev->dev.of_node,
  1107. stm32_dma_of_xlate, dmadev);
  1108. if (ret < 0) {
  1109. dev_err(&pdev->dev,
  1110. "STM32 DMA DMA OF registration failed %d\n", ret);
  1111. goto err_unregister;
  1112. }
  1113. platform_set_drvdata(pdev, dmadev);
  1114. dev_info(&pdev->dev, "STM32 DMA driver registered\n");
  1115. return 0;
  1116. err_unregister:
  1117. dma_async_device_unregister(dd);
  1118. return ret;
  1119. }
  1120. static struct platform_driver stm32_dma_driver = {
  1121. .driver = {
  1122. .name = "stm32-dma",
  1123. .of_match_table = stm32_dma_of_match,
  1124. },
  1125. };
  1126. static int __init stm32_dma_init(void)
  1127. {
  1128. return platform_driver_probe(&stm32_dma_driver, stm32_dma_probe);
  1129. }
  1130. subsys_initcall(stm32_dma_init);