spi-fsl-dspi.c 25 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030
  1. /*
  2. * drivers/spi/spi-fsl-dspi.c
  3. *
  4. * Copyright 2013 Freescale Semiconductor, Inc.
  5. *
  6. * Freescale DSPI driver
  7. * This file contains a driver for the Freescale DSPI
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation; either version 2 of the License, or
  12. * (at your option) any later version.
  13. *
  14. */
  15. #include <linux/clk.h>
  16. #include <linux/delay.h>
  17. #include <linux/dmaengine.h>
  18. #include <linux/dma-mapping.h>
  19. #include <linux/err.h>
  20. #include <linux/errno.h>
  21. #include <linux/interrupt.h>
  22. #include <linux/io.h>
  23. #include <linux/kernel.h>
  24. #include <linux/math64.h>
  25. #include <linux/module.h>
  26. #include <linux/of.h>
  27. #include <linux/of_device.h>
  28. #include <linux/pinctrl/consumer.h>
  29. #include <linux/platform_device.h>
  30. #include <linux/pm_runtime.h>
  31. #include <linux/regmap.h>
  32. #include <linux/sched.h>
  33. #include <linux/spi/spi.h>
  34. #include <linux/spi/spi-fsl-dspi.h>
  35. #include <linux/spi/spi_bitbang.h>
  36. #include <linux/time.h>
  37. #define DRIVER_NAME "fsl-dspi"
  38. #define DSPI_FIFO_SIZE 4
  39. #define DSPI_DMA_BUFSIZE (DSPI_FIFO_SIZE * 1024)
  40. #define SPI_MCR 0x00
  41. #define SPI_MCR_MASTER (1 << 31)
  42. #define SPI_MCR_PCSIS (0x3F << 16)
  43. #define SPI_MCR_CLR_TXF (1 << 11)
  44. #define SPI_MCR_CLR_RXF (1 << 10)
  45. #define SPI_TCR 0x08
  46. #define SPI_TCR_GET_TCNT(x) (((x) & 0xffff0000) >> 16)
  47. #define SPI_CTAR(x) (0x0c + (((x) & 0x3) * 4))
  48. #define SPI_CTAR_FMSZ(x) (((x) & 0x0000000f) << 27)
  49. #define SPI_CTAR_CPOL(x) ((x) << 26)
  50. #define SPI_CTAR_CPHA(x) ((x) << 25)
  51. #define SPI_CTAR_LSBFE(x) ((x) << 24)
  52. #define SPI_CTAR_PCSSCK(x) (((x) & 0x00000003) << 22)
  53. #define SPI_CTAR_PASC(x) (((x) & 0x00000003) << 20)
  54. #define SPI_CTAR_PDT(x) (((x) & 0x00000003) << 18)
  55. #define SPI_CTAR_PBR(x) (((x) & 0x00000003) << 16)
  56. #define SPI_CTAR_CSSCK(x) (((x) & 0x0000000f) << 12)
  57. #define SPI_CTAR_ASC(x) (((x) & 0x0000000f) << 8)
  58. #define SPI_CTAR_DT(x) (((x) & 0x0000000f) << 4)
  59. #define SPI_CTAR_BR(x) ((x) & 0x0000000f)
  60. #define SPI_CTAR_SCALE_BITS 0xf
  61. #define SPI_CTAR0_SLAVE 0x0c
  62. #define SPI_SR 0x2c
  63. #define SPI_SR_EOQF 0x10000000
  64. #define SPI_SR_TCFQF 0x80000000
  65. #define SPI_SR_CLEAR 0xdaad0000
  66. #define SPI_RSER_TFFFE BIT(25)
  67. #define SPI_RSER_TFFFD BIT(24)
  68. #define SPI_RSER_RFDFE BIT(17)
  69. #define SPI_RSER_RFDFD BIT(16)
  70. #define SPI_RSER 0x30
  71. #define SPI_RSER_EOQFE 0x10000000
  72. #define SPI_RSER_TCFQE 0x80000000
  73. #define SPI_PUSHR 0x34
  74. #define SPI_PUSHR_CMD_CONT (1 << 15)
  75. #define SPI_PUSHR_CONT (SPI_PUSHR_CMD_CONT << 16)
  76. #define SPI_PUSHR_CMD_CTAS(x) (((x) & 0x0003) << 12)
  77. #define SPI_PUSHR_CTAS(x) (SPI_PUSHR_CMD_CTAS(x) << 16)
  78. #define SPI_PUSHR_CMD_EOQ (1 << 11)
  79. #define SPI_PUSHR_EOQ (SPI_PUSHR_CMD_EOQ << 16)
  80. #define SPI_PUSHR_CMD_CTCNT (1 << 10)
  81. #define SPI_PUSHR_CTCNT (SPI_PUSHR_CMD_CTCNT << 16)
  82. #define SPI_PUSHR_CMD_PCS(x) ((1 << x) & 0x003f)
  83. #define SPI_PUSHR_PCS(x) (SPI_PUSHR_CMD_PCS(x) << 16)
  84. #define SPI_PUSHR_TXDATA(x) ((x) & 0x0000ffff)
  85. #define SPI_PUSHR_SLAVE 0x34
  86. #define SPI_POPR 0x38
  87. #define SPI_POPR_RXDATA(x) ((x) & 0x0000ffff)
  88. #define SPI_TXFR0 0x3c
  89. #define SPI_TXFR1 0x40
  90. #define SPI_TXFR2 0x44
  91. #define SPI_TXFR3 0x48
  92. #define SPI_RXFR0 0x7c
  93. #define SPI_RXFR1 0x80
  94. #define SPI_RXFR2 0x84
  95. #define SPI_RXFR3 0x88
  96. #define SPI_FRAME_BITS(bits) SPI_CTAR_FMSZ((bits) - 1)
  97. #define SPI_FRAME_BITS_MASK SPI_CTAR_FMSZ(0xf)
  98. #define SPI_FRAME_BITS_16 SPI_CTAR_FMSZ(0xf)
  99. #define SPI_FRAME_BITS_8 SPI_CTAR_FMSZ(0x7)
  100. #define SPI_CS_INIT 0x01
  101. #define SPI_CS_ASSERT 0x02
  102. #define SPI_CS_DROP 0x04
  103. #define DMA_COMPLETION_TIMEOUT msecs_to_jiffies(3000)
  104. struct chip_data {
  105. u32 mcr_val;
  106. u32 ctar_val;
  107. u16 void_write_data;
  108. };
  109. enum dspi_trans_mode {
  110. DSPI_EOQ_MODE = 0,
  111. DSPI_TCFQ_MODE,
  112. DSPI_DMA_MODE,
  113. };
  114. struct fsl_dspi_devtype_data {
  115. enum dspi_trans_mode trans_mode;
  116. u8 max_clock_factor;
  117. };
  118. static const struct fsl_dspi_devtype_data vf610_data = {
  119. .trans_mode = DSPI_DMA_MODE,
  120. .max_clock_factor = 2,
  121. };
  122. static const struct fsl_dspi_devtype_data ls1021a_v1_data = {
  123. .trans_mode = DSPI_TCFQ_MODE,
  124. .max_clock_factor = 8,
  125. };
  126. static const struct fsl_dspi_devtype_data ls2085a_data = {
  127. .trans_mode = DSPI_TCFQ_MODE,
  128. .max_clock_factor = 8,
  129. };
  130. static const struct fsl_dspi_devtype_data coldfire_data = {
  131. .trans_mode = DSPI_EOQ_MODE,
  132. .max_clock_factor = 8,
  133. };
  134. struct fsl_dspi_dma {
  135. /* Length of transfer in words of DSPI_FIFO_SIZE */
  136. u32 curr_xfer_len;
  137. u32 *tx_dma_buf;
  138. struct dma_chan *chan_tx;
  139. dma_addr_t tx_dma_phys;
  140. struct completion cmd_tx_complete;
  141. struct dma_async_tx_descriptor *tx_desc;
  142. u32 *rx_dma_buf;
  143. struct dma_chan *chan_rx;
  144. dma_addr_t rx_dma_phys;
  145. struct completion cmd_rx_complete;
  146. struct dma_async_tx_descriptor *rx_desc;
  147. };
  148. struct fsl_dspi {
  149. struct spi_master *master;
  150. struct platform_device *pdev;
  151. struct regmap *regmap;
  152. int irq;
  153. struct clk *clk;
  154. struct spi_transfer *cur_transfer;
  155. struct spi_message *cur_msg;
  156. struct chip_data *cur_chip;
  157. size_t len;
  158. const void *tx;
  159. void *rx;
  160. void *rx_end;
  161. u16 void_write_data;
  162. u16 tx_cmd;
  163. u8 bits_per_word;
  164. u8 bytes_per_word;
  165. const struct fsl_dspi_devtype_data *devtype_data;
  166. wait_queue_head_t waitq;
  167. u32 waitflags;
  168. struct fsl_dspi_dma *dma;
  169. };
  170. static u16 dspi_pop_tx(struct fsl_dspi *dspi)
  171. {
  172. u16 txdata = 0;
  173. if (dspi->tx) {
  174. if (dspi->bytes_per_word == 1)
  175. txdata = *(u8 *)dspi->tx;
  176. else /* dspi->bytes_per_word == 2 */
  177. txdata = *(u16 *)dspi->tx;
  178. dspi->tx += dspi->bytes_per_word;
  179. }
  180. dspi->len -= dspi->bytes_per_word;
  181. return txdata;
  182. }
  183. static u32 dspi_pop_tx_pushr(struct fsl_dspi *dspi)
  184. {
  185. u16 cmd = dspi->tx_cmd, data = dspi_pop_tx(dspi);
  186. if (dspi->len > 0)
  187. cmd |= SPI_PUSHR_CMD_CONT;
  188. return cmd << 16 | data;
  189. }
  190. static void dspi_push_rx(struct fsl_dspi *dspi, u32 rxdata)
  191. {
  192. if (!dspi->rx)
  193. return;
  194. /* Mask of undefined bits */
  195. rxdata &= (1 << dspi->bits_per_word) - 1;
  196. if (dspi->bytes_per_word == 1)
  197. *(u8 *)dspi->rx = rxdata;
  198. else /* dspi->bytes_per_word == 2 */
  199. *(u16 *)dspi->rx = rxdata;
  200. dspi->rx += dspi->bytes_per_word;
  201. }
  202. static void dspi_tx_dma_callback(void *arg)
  203. {
  204. struct fsl_dspi *dspi = arg;
  205. struct fsl_dspi_dma *dma = dspi->dma;
  206. complete(&dma->cmd_tx_complete);
  207. }
  208. static void dspi_rx_dma_callback(void *arg)
  209. {
  210. struct fsl_dspi *dspi = arg;
  211. struct fsl_dspi_dma *dma = dspi->dma;
  212. int i;
  213. if (dspi->rx) {
  214. for (i = 0; i < dma->curr_xfer_len; i++)
  215. dspi_push_rx(dspi, dspi->dma->rx_dma_buf[i]);
  216. }
  217. complete(&dma->cmd_rx_complete);
  218. }
  219. static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
  220. {
  221. struct fsl_dspi_dma *dma = dspi->dma;
  222. struct device *dev = &dspi->pdev->dev;
  223. int time_left;
  224. int i;
  225. for (i = 0; i < dma->curr_xfer_len; i++)
  226. dspi->dma->tx_dma_buf[i] = dspi_pop_tx_pushr(dspi);
  227. dma->tx_desc = dmaengine_prep_slave_single(dma->chan_tx,
  228. dma->tx_dma_phys,
  229. dma->curr_xfer_len *
  230. DMA_SLAVE_BUSWIDTH_4_BYTES,
  231. DMA_MEM_TO_DEV,
  232. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  233. if (!dma->tx_desc) {
  234. dev_err(dev, "Not able to get desc for DMA xfer\n");
  235. return -EIO;
  236. }
  237. dma->tx_desc->callback = dspi_tx_dma_callback;
  238. dma->tx_desc->callback_param = dspi;
  239. if (dma_submit_error(dmaengine_submit(dma->tx_desc))) {
  240. dev_err(dev, "DMA submit failed\n");
  241. return -EINVAL;
  242. }
  243. dma->rx_desc = dmaengine_prep_slave_single(dma->chan_rx,
  244. dma->rx_dma_phys,
  245. dma->curr_xfer_len *
  246. DMA_SLAVE_BUSWIDTH_4_BYTES,
  247. DMA_DEV_TO_MEM,
  248. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  249. if (!dma->rx_desc) {
  250. dev_err(dev, "Not able to get desc for DMA xfer\n");
  251. return -EIO;
  252. }
  253. dma->rx_desc->callback = dspi_rx_dma_callback;
  254. dma->rx_desc->callback_param = dspi;
  255. if (dma_submit_error(dmaengine_submit(dma->rx_desc))) {
  256. dev_err(dev, "DMA submit failed\n");
  257. return -EINVAL;
  258. }
  259. reinit_completion(&dspi->dma->cmd_rx_complete);
  260. reinit_completion(&dspi->dma->cmd_tx_complete);
  261. dma_async_issue_pending(dma->chan_rx);
  262. dma_async_issue_pending(dma->chan_tx);
  263. time_left = wait_for_completion_timeout(&dspi->dma->cmd_tx_complete,
  264. DMA_COMPLETION_TIMEOUT);
  265. if (time_left == 0) {
  266. dev_err(dev, "DMA tx timeout\n");
  267. dmaengine_terminate_all(dma->chan_tx);
  268. dmaengine_terminate_all(dma->chan_rx);
  269. return -ETIMEDOUT;
  270. }
  271. time_left = wait_for_completion_timeout(&dspi->dma->cmd_rx_complete,
  272. DMA_COMPLETION_TIMEOUT);
  273. if (time_left == 0) {
  274. dev_err(dev, "DMA rx timeout\n");
  275. dmaengine_terminate_all(dma->chan_tx);
  276. dmaengine_terminate_all(dma->chan_rx);
  277. return -ETIMEDOUT;
  278. }
  279. return 0;
  280. }
  281. static int dspi_dma_xfer(struct fsl_dspi *dspi)
  282. {
  283. struct fsl_dspi_dma *dma = dspi->dma;
  284. struct device *dev = &dspi->pdev->dev;
  285. int curr_remaining_bytes;
  286. int bytes_per_buffer;
  287. int ret = 0;
  288. curr_remaining_bytes = dspi->len;
  289. bytes_per_buffer = DSPI_DMA_BUFSIZE / DSPI_FIFO_SIZE;
  290. while (curr_remaining_bytes) {
  291. /* Check if current transfer fits the DMA buffer */
  292. dma->curr_xfer_len = curr_remaining_bytes
  293. / dspi->bytes_per_word;
  294. if (dma->curr_xfer_len > bytes_per_buffer)
  295. dma->curr_xfer_len = bytes_per_buffer;
  296. ret = dspi_next_xfer_dma_submit(dspi);
  297. if (ret) {
  298. dev_err(dev, "DMA transfer failed\n");
  299. goto exit;
  300. } else {
  301. curr_remaining_bytes -= dma->curr_xfer_len
  302. * dspi->bytes_per_word;
  303. if (curr_remaining_bytes < 0)
  304. curr_remaining_bytes = 0;
  305. }
  306. }
  307. exit:
  308. return ret;
  309. }
  310. static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr)
  311. {
  312. struct fsl_dspi_dma *dma;
  313. struct dma_slave_config cfg;
  314. struct device *dev = &dspi->pdev->dev;
  315. int ret;
  316. dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
  317. if (!dma)
  318. return -ENOMEM;
  319. dma->chan_rx = dma_request_slave_channel(dev, "rx");
  320. if (!dma->chan_rx) {
  321. dev_err(dev, "rx dma channel not available\n");
  322. ret = -ENODEV;
  323. return ret;
  324. }
  325. dma->chan_tx = dma_request_slave_channel(dev, "tx");
  326. if (!dma->chan_tx) {
  327. dev_err(dev, "tx dma channel not available\n");
  328. ret = -ENODEV;
  329. goto err_tx_channel;
  330. }
  331. dma->tx_dma_buf = dma_alloc_coherent(dev, DSPI_DMA_BUFSIZE,
  332. &dma->tx_dma_phys, GFP_KERNEL);
  333. if (!dma->tx_dma_buf) {
  334. ret = -ENOMEM;
  335. goto err_tx_dma_buf;
  336. }
  337. dma->rx_dma_buf = dma_alloc_coherent(dev, DSPI_DMA_BUFSIZE,
  338. &dma->rx_dma_phys, GFP_KERNEL);
  339. if (!dma->rx_dma_buf) {
  340. ret = -ENOMEM;
  341. goto err_rx_dma_buf;
  342. }
  343. cfg.src_addr = phy_addr + SPI_POPR;
  344. cfg.dst_addr = phy_addr + SPI_PUSHR;
  345. cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  346. cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  347. cfg.src_maxburst = 1;
  348. cfg.dst_maxburst = 1;
  349. cfg.direction = DMA_DEV_TO_MEM;
  350. ret = dmaengine_slave_config(dma->chan_rx, &cfg);
  351. if (ret) {
  352. dev_err(dev, "can't configure rx dma channel\n");
  353. ret = -EINVAL;
  354. goto err_slave_config;
  355. }
  356. cfg.direction = DMA_MEM_TO_DEV;
  357. ret = dmaengine_slave_config(dma->chan_tx, &cfg);
  358. if (ret) {
  359. dev_err(dev, "can't configure tx dma channel\n");
  360. ret = -EINVAL;
  361. goto err_slave_config;
  362. }
  363. dspi->dma = dma;
  364. init_completion(&dma->cmd_tx_complete);
  365. init_completion(&dma->cmd_rx_complete);
  366. return 0;
  367. err_slave_config:
  368. dma_free_coherent(dev, DSPI_DMA_BUFSIZE,
  369. dma->rx_dma_buf, dma->rx_dma_phys);
  370. err_rx_dma_buf:
  371. dma_free_coherent(dev, DSPI_DMA_BUFSIZE,
  372. dma->tx_dma_buf, dma->tx_dma_phys);
  373. err_tx_dma_buf:
  374. dma_release_channel(dma->chan_tx);
  375. err_tx_channel:
  376. dma_release_channel(dma->chan_rx);
  377. devm_kfree(dev, dma);
  378. dspi->dma = NULL;
  379. return ret;
  380. }
  381. static void dspi_release_dma(struct fsl_dspi *dspi)
  382. {
  383. struct fsl_dspi_dma *dma = dspi->dma;
  384. struct device *dev = &dspi->pdev->dev;
  385. if (dma) {
  386. if (dma->chan_tx) {
  387. dma_unmap_single(dev, dma->tx_dma_phys,
  388. DSPI_DMA_BUFSIZE, DMA_TO_DEVICE);
  389. dma_release_channel(dma->chan_tx);
  390. }
  391. if (dma->chan_rx) {
  392. dma_unmap_single(dev, dma->rx_dma_phys,
  393. DSPI_DMA_BUFSIZE, DMA_FROM_DEVICE);
  394. dma_release_channel(dma->chan_rx);
  395. }
  396. }
  397. }
  398. static void hz_to_spi_baud(char *pbr, char *br, int speed_hz,
  399. unsigned long clkrate)
  400. {
  401. /* Valid baud rate pre-scaler values */
  402. int pbr_tbl[4] = {2, 3, 5, 7};
  403. int brs[16] = { 2, 4, 6, 8,
  404. 16, 32, 64, 128,
  405. 256, 512, 1024, 2048,
  406. 4096, 8192, 16384, 32768 };
  407. int scale_needed, scale, minscale = INT_MAX;
  408. int i, j;
  409. scale_needed = clkrate / speed_hz;
  410. if (clkrate % speed_hz)
  411. scale_needed++;
  412. for (i = 0; i < ARRAY_SIZE(brs); i++)
  413. for (j = 0; j < ARRAY_SIZE(pbr_tbl); j++) {
  414. scale = brs[i] * pbr_tbl[j];
  415. if (scale >= scale_needed) {
  416. if (scale < minscale) {
  417. minscale = scale;
  418. *br = i;
  419. *pbr = j;
  420. }
  421. break;
  422. }
  423. }
  424. if (minscale == INT_MAX) {
  425. pr_warn("Can not find valid baud rate,speed_hz is %d,clkrate is %ld, we use the max prescaler value.\n",
  426. speed_hz, clkrate);
  427. *pbr = ARRAY_SIZE(pbr_tbl) - 1;
  428. *br = ARRAY_SIZE(brs) - 1;
  429. }
  430. }
  431. static void ns_delay_scale(char *psc, char *sc, int delay_ns,
  432. unsigned long clkrate)
  433. {
  434. int pscale_tbl[4] = {1, 3, 5, 7};
  435. int scale_needed, scale, minscale = INT_MAX;
  436. int i, j;
  437. u32 remainder;
  438. scale_needed = div_u64_rem((u64)delay_ns * clkrate, NSEC_PER_SEC,
  439. &remainder);
  440. if (remainder)
  441. scale_needed++;
  442. for (i = 0; i < ARRAY_SIZE(pscale_tbl); i++)
  443. for (j = 0; j <= SPI_CTAR_SCALE_BITS; j++) {
  444. scale = pscale_tbl[i] * (2 << j);
  445. if (scale >= scale_needed) {
  446. if (scale < minscale) {
  447. minscale = scale;
  448. *psc = i;
  449. *sc = j;
  450. }
  451. break;
  452. }
  453. }
  454. if (minscale == INT_MAX) {
  455. pr_warn("Cannot find correct scale values for %dns delay at clkrate %ld, using max prescaler value",
  456. delay_ns, clkrate);
  457. *psc = ARRAY_SIZE(pscale_tbl) - 1;
  458. *sc = SPI_CTAR_SCALE_BITS;
  459. }
  460. }
  461. static void fifo_write(struct fsl_dspi *dspi)
  462. {
  463. regmap_write(dspi->regmap, SPI_PUSHR, dspi_pop_tx_pushr(dspi));
  464. }
  465. static void dspi_tcfq_write(struct fsl_dspi *dspi)
  466. {
  467. /* Clear transfer count */
  468. dspi->tx_cmd |= SPI_PUSHR_CMD_CTCNT;
  469. /* Write one entry to both TX FIFO and CMD FIFO simultaneously */
  470. fifo_write(dspi);
  471. }
  472. static u32 fifo_read(struct fsl_dspi *dspi)
  473. {
  474. u32 rxdata = 0;
  475. regmap_read(dspi->regmap, SPI_POPR, &rxdata);
  476. return rxdata;
  477. }
  478. static void dspi_tcfq_read(struct fsl_dspi *dspi)
  479. {
  480. dspi_push_rx(dspi, fifo_read(dspi));
  481. }
  482. static void dspi_eoq_write(struct fsl_dspi *dspi)
  483. {
  484. int fifo_size = DSPI_FIFO_SIZE;
  485. /* Fill TX FIFO with as many transfers as possible */
  486. while (dspi->len && fifo_size--) {
  487. /* Request EOQF for last transfer in FIFO */
  488. if (dspi->len == dspi->bytes_per_word || fifo_size == 0)
  489. dspi->tx_cmd |= SPI_PUSHR_CMD_EOQ;
  490. /* Clear transfer count for first transfer in FIFO */
  491. if (fifo_size == (DSPI_FIFO_SIZE - 1))
  492. dspi->tx_cmd |= SPI_PUSHR_CMD_CTCNT;
  493. /* Write combined TX FIFO and CMD FIFO entry */
  494. fifo_write(dspi);
  495. }
  496. }
  497. static void dspi_eoq_read(struct fsl_dspi *dspi)
  498. {
  499. int fifo_size = DSPI_FIFO_SIZE;
  500. /* Read one FIFO entry at and push to rx buffer */
  501. while ((dspi->rx < dspi->rx_end) && fifo_size--)
  502. dspi_push_rx(dspi, fifo_read(dspi));
  503. }
  504. static int dspi_transfer_one_message(struct spi_master *master,
  505. struct spi_message *message)
  506. {
  507. struct fsl_dspi *dspi = spi_master_get_devdata(master);
  508. struct spi_device *spi = message->spi;
  509. struct spi_transfer *transfer;
  510. int status = 0;
  511. enum dspi_trans_mode trans_mode;
  512. message->actual_length = 0;
  513. list_for_each_entry(transfer, &message->transfers, transfer_list) {
  514. dspi->cur_transfer = transfer;
  515. dspi->cur_msg = message;
  516. dspi->cur_chip = spi_get_ctldata(spi);
  517. /* Prepare command word for CMD FIFO */
  518. dspi->tx_cmd = SPI_PUSHR_CMD_CTAS(0) |
  519. SPI_PUSHR_CMD_PCS(spi->chip_select);
  520. if (list_is_last(&dspi->cur_transfer->transfer_list,
  521. &dspi->cur_msg->transfers)) {
  522. /* Leave PCS activated after last transfer when
  523. * cs_change is set.
  524. */
  525. if (transfer->cs_change)
  526. dspi->tx_cmd |= SPI_PUSHR_CMD_CONT;
  527. } else {
  528. /* Keep PCS active between transfers in same message
  529. * when cs_change is not set, and de-activate PCS
  530. * between transfers in the same message when
  531. * cs_change is set.
  532. */
  533. if (!transfer->cs_change)
  534. dspi->tx_cmd |= SPI_PUSHR_CMD_CONT;
  535. }
  536. dspi->void_write_data = dspi->cur_chip->void_write_data;
  537. dspi->tx = transfer->tx_buf;
  538. dspi->rx = transfer->rx_buf;
  539. dspi->rx_end = dspi->rx + transfer->len;
  540. dspi->len = transfer->len;
  541. /* Validated transfer specific frame size (defaults applied) */
  542. dspi->bits_per_word = transfer->bits_per_word;
  543. if (transfer->bits_per_word <= 8)
  544. dspi->bytes_per_word = 1;
  545. else
  546. dspi->bytes_per_word = 2;
  547. regmap_write(dspi->regmap, SPI_MCR, dspi->cur_chip->mcr_val);
  548. regmap_update_bits(dspi->regmap, SPI_MCR,
  549. SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF,
  550. SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF);
  551. regmap_write(dspi->regmap, SPI_CTAR(0),
  552. dspi->cur_chip->ctar_val |
  553. SPI_FRAME_BITS(transfer->bits_per_word));
  554. trans_mode = dspi->devtype_data->trans_mode;
  555. switch (trans_mode) {
  556. case DSPI_EOQ_MODE:
  557. regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_EOQFE);
  558. dspi_eoq_write(dspi);
  559. break;
  560. case DSPI_TCFQ_MODE:
  561. regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_TCFQE);
  562. dspi_tcfq_write(dspi);
  563. break;
  564. case DSPI_DMA_MODE:
  565. regmap_write(dspi->regmap, SPI_RSER,
  566. SPI_RSER_TFFFE | SPI_RSER_TFFFD |
  567. SPI_RSER_RFDFE | SPI_RSER_RFDFD);
  568. status = dspi_dma_xfer(dspi);
  569. break;
  570. default:
  571. dev_err(&dspi->pdev->dev, "unsupported trans_mode %u\n",
  572. trans_mode);
  573. status = -EINVAL;
  574. goto out;
  575. }
  576. if (trans_mode != DSPI_DMA_MODE) {
  577. if (wait_event_interruptible(dspi->waitq,
  578. dspi->waitflags))
  579. dev_err(&dspi->pdev->dev,
  580. "wait transfer complete fail!\n");
  581. dspi->waitflags = 0;
  582. }
  583. if (transfer->delay_usecs)
  584. udelay(transfer->delay_usecs);
  585. }
  586. out:
  587. message->status = status;
  588. spi_finalize_current_message(master);
  589. return status;
  590. }
  591. static int dspi_setup(struct spi_device *spi)
  592. {
  593. struct chip_data *chip;
  594. struct fsl_dspi *dspi = spi_master_get_devdata(spi->master);
  595. struct fsl_dspi_platform_data *pdata;
  596. u32 cs_sck_delay = 0, sck_cs_delay = 0;
  597. unsigned char br = 0, pbr = 0, pcssck = 0, cssck = 0;
  598. unsigned char pasc = 0, asc = 0;
  599. unsigned long clkrate;
  600. /* Only alloc on first setup */
  601. chip = spi_get_ctldata(spi);
  602. if (chip == NULL) {
  603. chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
  604. if (!chip)
  605. return -ENOMEM;
  606. }
  607. pdata = dev_get_platdata(&dspi->pdev->dev);
  608. if (!pdata) {
  609. of_property_read_u32(spi->dev.of_node, "fsl,spi-cs-sck-delay",
  610. &cs_sck_delay);
  611. of_property_read_u32(spi->dev.of_node, "fsl,spi-sck-cs-delay",
  612. &sck_cs_delay);
  613. } else {
  614. cs_sck_delay = pdata->cs_sck_delay;
  615. sck_cs_delay = pdata->sck_cs_delay;
  616. }
  617. chip->mcr_val = SPI_MCR_MASTER | SPI_MCR_PCSIS |
  618. SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF;
  619. chip->void_write_data = 0;
  620. clkrate = clk_get_rate(dspi->clk);
  621. hz_to_spi_baud(&pbr, &br, spi->max_speed_hz, clkrate);
  622. /* Set PCS to SCK delay scale values */
  623. ns_delay_scale(&pcssck, &cssck, cs_sck_delay, clkrate);
  624. /* Set After SCK delay scale values */
  625. ns_delay_scale(&pasc, &asc, sck_cs_delay, clkrate);
  626. chip->ctar_val = SPI_CTAR_CPOL(spi->mode & SPI_CPOL ? 1 : 0)
  627. | SPI_CTAR_CPHA(spi->mode & SPI_CPHA ? 1 : 0)
  628. | SPI_CTAR_LSBFE(spi->mode & SPI_LSB_FIRST ? 1 : 0)
  629. | SPI_CTAR_PCSSCK(pcssck)
  630. | SPI_CTAR_CSSCK(cssck)
  631. | SPI_CTAR_PASC(pasc)
  632. | SPI_CTAR_ASC(asc)
  633. | SPI_CTAR_PBR(pbr)
  634. | SPI_CTAR_BR(br);
  635. spi_set_ctldata(spi, chip);
  636. return 0;
  637. }
  638. static void dspi_cleanup(struct spi_device *spi)
  639. {
  640. struct chip_data *chip = spi_get_ctldata((struct spi_device *)spi);
  641. dev_dbg(&spi->dev, "spi_device %u.%u cleanup\n",
  642. spi->master->bus_num, spi->chip_select);
  643. kfree(chip);
  644. }
  645. static irqreturn_t dspi_interrupt(int irq, void *dev_id)
  646. {
  647. struct fsl_dspi *dspi = (struct fsl_dspi *)dev_id;
  648. struct spi_message *msg = dspi->cur_msg;
  649. enum dspi_trans_mode trans_mode;
  650. u32 spi_sr, spi_tcr;
  651. u16 spi_tcnt;
  652. regmap_read(dspi->regmap, SPI_SR, &spi_sr);
  653. regmap_write(dspi->regmap, SPI_SR, spi_sr);
  654. if (spi_sr & (SPI_SR_EOQF | SPI_SR_TCFQF)) {
  655. /* Get transfer counter (in number of SPI transfers). It was
  656. * reset to 0 when transfer(s) were started.
  657. */
  658. regmap_read(dspi->regmap, SPI_TCR, &spi_tcr);
  659. spi_tcnt = SPI_TCR_GET_TCNT(spi_tcr);
  660. /* Update total number of bytes that were transferred */
  661. msg->actual_length += spi_tcnt * dspi->bytes_per_word;
  662. trans_mode = dspi->devtype_data->trans_mode;
  663. switch (trans_mode) {
  664. case DSPI_EOQ_MODE:
  665. dspi_eoq_read(dspi);
  666. break;
  667. case DSPI_TCFQ_MODE:
  668. dspi_tcfq_read(dspi);
  669. break;
  670. default:
  671. dev_err(&dspi->pdev->dev, "unsupported trans_mode %u\n",
  672. trans_mode);
  673. return IRQ_HANDLED;
  674. }
  675. if (!dspi->len) {
  676. dspi->waitflags = 1;
  677. wake_up_interruptible(&dspi->waitq);
  678. } else {
  679. switch (trans_mode) {
  680. case DSPI_EOQ_MODE:
  681. dspi_eoq_write(dspi);
  682. break;
  683. case DSPI_TCFQ_MODE:
  684. dspi_tcfq_write(dspi);
  685. break;
  686. default:
  687. dev_err(&dspi->pdev->dev,
  688. "unsupported trans_mode %u\n",
  689. trans_mode);
  690. }
  691. }
  692. }
  693. return IRQ_HANDLED;
  694. }
  695. static const struct of_device_id fsl_dspi_dt_ids[] = {
  696. { .compatible = "fsl,vf610-dspi", .data = &vf610_data, },
  697. { .compatible = "fsl,ls1021a-v1.0-dspi", .data = &ls1021a_v1_data, },
  698. { .compatible = "fsl,ls2085a-dspi", .data = &ls2085a_data, },
  699. { /* sentinel */ }
  700. };
  701. MODULE_DEVICE_TABLE(of, fsl_dspi_dt_ids);
  702. #ifdef CONFIG_PM_SLEEP
  703. static int dspi_suspend(struct device *dev)
  704. {
  705. struct spi_master *master = dev_get_drvdata(dev);
  706. struct fsl_dspi *dspi = spi_master_get_devdata(master);
  707. spi_master_suspend(master);
  708. clk_disable_unprepare(dspi->clk);
  709. pinctrl_pm_select_sleep_state(dev);
  710. return 0;
  711. }
  712. static int dspi_resume(struct device *dev)
  713. {
  714. struct spi_master *master = dev_get_drvdata(dev);
  715. struct fsl_dspi *dspi = spi_master_get_devdata(master);
  716. int ret;
  717. pinctrl_pm_select_default_state(dev);
  718. ret = clk_prepare_enable(dspi->clk);
  719. if (ret)
  720. return ret;
  721. spi_master_resume(master);
  722. return 0;
  723. }
  724. #endif /* CONFIG_PM_SLEEP */
  725. static SIMPLE_DEV_PM_OPS(dspi_pm, dspi_suspend, dspi_resume);
  726. static const struct regmap_config dspi_regmap_config = {
  727. .reg_bits = 32,
  728. .val_bits = 32,
  729. .reg_stride = 4,
  730. .max_register = 0x88,
  731. };
  732. static void dspi_init(struct fsl_dspi *dspi)
  733. {
  734. regmap_write(dspi->regmap, SPI_SR, SPI_SR_CLEAR);
  735. }
  736. static int dspi_probe(struct platform_device *pdev)
  737. {
  738. struct device_node *np = pdev->dev.of_node;
  739. struct spi_master *master;
  740. struct fsl_dspi *dspi;
  741. struct resource *res;
  742. void __iomem *base;
  743. struct fsl_dspi_platform_data *pdata;
  744. int ret = 0, cs_num, bus_num;
  745. master = spi_alloc_master(&pdev->dev, sizeof(struct fsl_dspi));
  746. if (!master)
  747. return -ENOMEM;
  748. dspi = spi_master_get_devdata(master);
  749. dspi->pdev = pdev;
  750. dspi->master = master;
  751. master->transfer = NULL;
  752. master->setup = dspi_setup;
  753. master->transfer_one_message = dspi_transfer_one_message;
  754. master->dev.of_node = pdev->dev.of_node;
  755. master->cleanup = dspi_cleanup;
  756. master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
  757. master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
  758. pdata = dev_get_platdata(&pdev->dev);
  759. if (pdata) {
  760. master->num_chipselect = pdata->cs_num;
  761. master->bus_num = pdata->bus_num;
  762. dspi->devtype_data = &coldfire_data;
  763. } else {
  764. ret = of_property_read_u32(np, "spi-num-chipselects", &cs_num);
  765. if (ret < 0) {
  766. dev_err(&pdev->dev, "can't get spi-num-chipselects\n");
  767. goto out_master_put;
  768. }
  769. master->num_chipselect = cs_num;
  770. ret = of_property_read_u32(np, "bus-num", &bus_num);
  771. if (ret < 0) {
  772. dev_err(&pdev->dev, "can't get bus-num\n");
  773. goto out_master_put;
  774. }
  775. master->bus_num = bus_num;
  776. dspi->devtype_data = of_device_get_match_data(&pdev->dev);
  777. if (!dspi->devtype_data) {
  778. dev_err(&pdev->dev, "can't get devtype_data\n");
  779. ret = -EFAULT;
  780. goto out_master_put;
  781. }
  782. }
  783. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  784. base = devm_ioremap_resource(&pdev->dev, res);
  785. if (IS_ERR(base)) {
  786. ret = PTR_ERR(base);
  787. goto out_master_put;
  788. }
  789. dspi->regmap = devm_regmap_init_mmio_clk(&pdev->dev, NULL, base,
  790. &dspi_regmap_config);
  791. if (IS_ERR(dspi->regmap)) {
  792. dev_err(&pdev->dev, "failed to init regmap: %ld\n",
  793. PTR_ERR(dspi->regmap));
  794. ret = PTR_ERR(dspi->regmap);
  795. goto out_master_put;
  796. }
  797. dspi_init(dspi);
  798. dspi->irq = platform_get_irq(pdev, 0);
  799. if (dspi->irq < 0) {
  800. dev_err(&pdev->dev, "can't get platform irq\n");
  801. ret = dspi->irq;
  802. goto out_master_put;
  803. }
  804. ret = devm_request_irq(&pdev->dev, dspi->irq, dspi_interrupt, 0,
  805. pdev->name, dspi);
  806. if (ret < 0) {
  807. dev_err(&pdev->dev, "Unable to attach DSPI interrupt\n");
  808. goto out_master_put;
  809. }
  810. dspi->clk = devm_clk_get(&pdev->dev, "dspi");
  811. if (IS_ERR(dspi->clk)) {
  812. ret = PTR_ERR(dspi->clk);
  813. dev_err(&pdev->dev, "unable to get clock\n");
  814. goto out_master_put;
  815. }
  816. ret = clk_prepare_enable(dspi->clk);
  817. if (ret)
  818. goto out_master_put;
  819. if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) {
  820. ret = dspi_request_dma(dspi, res->start);
  821. if (ret < 0) {
  822. dev_err(&pdev->dev, "can't get dma channels\n");
  823. goto out_clk_put;
  824. }
  825. }
  826. master->max_speed_hz =
  827. clk_get_rate(dspi->clk) / dspi->devtype_data->max_clock_factor;
  828. init_waitqueue_head(&dspi->waitq);
  829. platform_set_drvdata(pdev, master);
  830. ret = spi_register_master(master);
  831. if (ret != 0) {
  832. dev_err(&pdev->dev, "Problem registering DSPI master\n");
  833. goto out_clk_put;
  834. }
  835. return ret;
  836. out_clk_put:
  837. clk_disable_unprepare(dspi->clk);
  838. out_master_put:
  839. spi_master_put(master);
  840. return ret;
  841. }
  842. static int dspi_remove(struct platform_device *pdev)
  843. {
  844. struct spi_master *master = platform_get_drvdata(pdev);
  845. struct fsl_dspi *dspi = spi_master_get_devdata(master);
  846. /* Disconnect from the SPI framework */
  847. dspi_release_dma(dspi);
  848. clk_disable_unprepare(dspi->clk);
  849. spi_unregister_master(dspi->master);
  850. return 0;
  851. }
  852. static struct platform_driver fsl_dspi_driver = {
  853. .driver.name = DRIVER_NAME,
  854. .driver.of_match_table = fsl_dspi_dt_ids,
  855. .driver.owner = THIS_MODULE,
  856. .driver.pm = &dspi_pm,
  857. .probe = dspi_probe,
  858. .remove = dspi_remove,
  859. };
  860. module_platform_driver(fsl_dspi_driver);
  861. MODULE_DESCRIPTION("Freescale DSPI Controller Driver");
  862. MODULE_LICENSE("GPL");
  863. MODULE_ALIAS("platform:" DRIVER_NAME);