spi-fsl-dspi.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149
  1. // SPDX-License-Identifier: GPL-2.0+
  2. //
  3. // Copyright 2013 Freescale Semiconductor, Inc.
  4. //
  5. // Freescale DSPI driver
  6. // This file contains a driver for the Freescale DSPI
  7. #include <linux/clk.h>
  8. #include <linux/delay.h>
  9. #include <linux/dmaengine.h>
  10. #include <linux/dma-mapping.h>
  11. #include <linux/err.h>
  12. #include <linux/errno.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/io.h>
  15. #include <linux/kernel.h>
  16. #include <linux/math64.h>
  17. #include <linux/module.h>
  18. #include <linux/of.h>
  19. #include <linux/of_device.h>
  20. #include <linux/pinctrl/consumer.h>
  21. #include <linux/platform_device.h>
  22. #include <linux/pm_runtime.h>
  23. #include <linux/regmap.h>
  24. #include <linux/sched.h>
  25. #include <linux/spi/spi.h>
  26. #include <linux/spi/spi-fsl-dspi.h>
  27. #include <linux/spi/spi_bitbang.h>
  28. #include <linux/time.h>
  29. #define DRIVER_NAME "fsl-dspi"
  30. #define DSPI_FIFO_SIZE 4
  31. #define DSPI_DMA_BUFSIZE (DSPI_FIFO_SIZE * 1024)
  32. #define SPI_MCR 0x00
  33. #define SPI_MCR_MASTER (1 << 31)
  34. #define SPI_MCR_PCSIS (0x3F << 16)
  35. #define SPI_MCR_CLR_TXF (1 << 11)
  36. #define SPI_MCR_CLR_RXF (1 << 10)
  37. #define SPI_MCR_XSPI (1 << 3)
  38. #define SPI_TCR 0x08
  39. #define SPI_TCR_GET_TCNT(x) (((x) & 0xffff0000) >> 16)
  40. #define SPI_CTAR(x) (0x0c + (((x) & 0x3) * 4))
  41. #define SPI_CTAR_FMSZ(x) (((x) & 0x0000000f) << 27)
  42. #define SPI_CTAR_CPOL(x) ((x) << 26)
  43. #define SPI_CTAR_CPHA(x) ((x) << 25)
  44. #define SPI_CTAR_LSBFE(x) ((x) << 24)
  45. #define SPI_CTAR_PCSSCK(x) (((x) & 0x00000003) << 22)
  46. #define SPI_CTAR_PASC(x) (((x) & 0x00000003) << 20)
  47. #define SPI_CTAR_PDT(x) (((x) & 0x00000003) << 18)
  48. #define SPI_CTAR_PBR(x) (((x) & 0x00000003) << 16)
  49. #define SPI_CTAR_CSSCK(x) (((x) & 0x0000000f) << 12)
  50. #define SPI_CTAR_ASC(x) (((x) & 0x0000000f) << 8)
  51. #define SPI_CTAR_DT(x) (((x) & 0x0000000f) << 4)
  52. #define SPI_CTAR_BR(x) ((x) & 0x0000000f)
  53. #define SPI_CTAR_SCALE_BITS 0xf
  54. #define SPI_CTAR0_SLAVE 0x0c
  55. #define SPI_SR 0x2c
  56. #define SPI_SR_EOQF 0x10000000
  57. #define SPI_SR_TCFQF 0x80000000
  58. #define SPI_SR_CLEAR 0xdaad0000
  59. #define SPI_RSER_TFFFE BIT(25)
  60. #define SPI_RSER_TFFFD BIT(24)
  61. #define SPI_RSER_RFDFE BIT(17)
  62. #define SPI_RSER_RFDFD BIT(16)
  63. #define SPI_RSER 0x30
  64. #define SPI_RSER_EOQFE 0x10000000
  65. #define SPI_RSER_TCFQE 0x80000000
  66. #define SPI_PUSHR 0x34
  67. #define SPI_PUSHR_CMD_CONT (1 << 15)
  68. #define SPI_PUSHR_CONT (SPI_PUSHR_CMD_CONT << 16)
  69. #define SPI_PUSHR_CMD_CTAS(x) (((x) & 0x0003) << 12)
  70. #define SPI_PUSHR_CTAS(x) (SPI_PUSHR_CMD_CTAS(x) << 16)
  71. #define SPI_PUSHR_CMD_EOQ (1 << 11)
  72. #define SPI_PUSHR_EOQ (SPI_PUSHR_CMD_EOQ << 16)
  73. #define SPI_PUSHR_CMD_CTCNT (1 << 10)
  74. #define SPI_PUSHR_CTCNT (SPI_PUSHR_CMD_CTCNT << 16)
  75. #define SPI_PUSHR_CMD_PCS(x) ((1 << x) & 0x003f)
  76. #define SPI_PUSHR_PCS(x) (SPI_PUSHR_CMD_PCS(x) << 16)
  77. #define SPI_PUSHR_TXDATA(x) ((x) & 0x0000ffff)
  78. #define SPI_PUSHR_SLAVE 0x34
  79. #define SPI_POPR 0x38
  80. #define SPI_POPR_RXDATA(x) ((x) & 0x0000ffff)
  81. #define SPI_TXFR0 0x3c
  82. #define SPI_TXFR1 0x40
  83. #define SPI_TXFR2 0x44
  84. #define SPI_TXFR3 0x48
  85. #define SPI_RXFR0 0x7c
  86. #define SPI_RXFR1 0x80
  87. #define SPI_RXFR2 0x84
  88. #define SPI_RXFR3 0x88
  89. #define SPI_CTARE(x) (0x11c + (((x) & 0x3) * 4))
  90. #define SPI_CTARE_FMSZE(x) (((x) & 0x1) << 16)
  91. #define SPI_CTARE_DTCP(x) ((x) & 0x7ff)
  92. #define SPI_SREX 0x13c
  93. #define SPI_FRAME_BITS(bits) SPI_CTAR_FMSZ((bits) - 1)
  94. #define SPI_FRAME_BITS_MASK SPI_CTAR_FMSZ(0xf)
  95. #define SPI_FRAME_BITS_16 SPI_CTAR_FMSZ(0xf)
  96. #define SPI_FRAME_BITS_8 SPI_CTAR_FMSZ(0x7)
  97. #define SPI_FRAME_EBITS(bits) SPI_CTARE_FMSZE(((bits) - 1) >> 4)
  98. #define SPI_FRAME_EBITS_MASK SPI_CTARE_FMSZE(1)
  99. /* Register offsets for regmap_pushr */
  100. #define PUSHR_CMD 0x0
  101. #define PUSHR_TX 0x2
  102. #define SPI_CS_INIT 0x01
  103. #define SPI_CS_ASSERT 0x02
  104. #define SPI_CS_DROP 0x04
  105. #define DMA_COMPLETION_TIMEOUT msecs_to_jiffies(3000)
  106. struct chip_data {
  107. u32 ctar_val;
  108. u16 void_write_data;
  109. };
  110. enum dspi_trans_mode {
  111. DSPI_EOQ_MODE = 0,
  112. DSPI_TCFQ_MODE,
  113. DSPI_DMA_MODE,
  114. };
  115. struct fsl_dspi_devtype_data {
  116. enum dspi_trans_mode trans_mode;
  117. u8 max_clock_factor;
  118. bool xspi_mode;
  119. };
  120. static const struct fsl_dspi_devtype_data vf610_data = {
  121. .trans_mode = DSPI_DMA_MODE,
  122. .max_clock_factor = 2,
  123. };
  124. static const struct fsl_dspi_devtype_data ls1021a_v1_data = {
  125. .trans_mode = DSPI_TCFQ_MODE,
  126. .max_clock_factor = 8,
  127. .xspi_mode = true,
  128. };
  129. static const struct fsl_dspi_devtype_data ls2085a_data = {
  130. .trans_mode = DSPI_TCFQ_MODE,
  131. .max_clock_factor = 8,
  132. };
  133. static const struct fsl_dspi_devtype_data coldfire_data = {
  134. .trans_mode = DSPI_EOQ_MODE,
  135. .max_clock_factor = 8,
  136. };
  137. struct fsl_dspi_dma {
  138. /* Length of transfer in words of DSPI_FIFO_SIZE */
  139. u32 curr_xfer_len;
  140. u32 *tx_dma_buf;
  141. struct dma_chan *chan_tx;
  142. dma_addr_t tx_dma_phys;
  143. struct completion cmd_tx_complete;
  144. struct dma_async_tx_descriptor *tx_desc;
  145. u32 *rx_dma_buf;
  146. struct dma_chan *chan_rx;
  147. dma_addr_t rx_dma_phys;
  148. struct completion cmd_rx_complete;
  149. struct dma_async_tx_descriptor *rx_desc;
  150. };
  151. struct fsl_dspi {
  152. struct spi_master *master;
  153. struct platform_device *pdev;
  154. struct regmap *regmap;
  155. struct regmap *regmap_pushr;
  156. int irq;
  157. struct clk *clk;
  158. struct spi_transfer *cur_transfer;
  159. struct spi_message *cur_msg;
  160. struct chip_data *cur_chip;
  161. size_t len;
  162. const void *tx;
  163. void *rx;
  164. void *rx_end;
  165. u16 void_write_data;
  166. u16 tx_cmd;
  167. u8 bits_per_word;
  168. u8 bytes_per_word;
  169. const struct fsl_dspi_devtype_data *devtype_data;
  170. wait_queue_head_t waitq;
  171. u32 waitflags;
  172. struct fsl_dspi_dma *dma;
  173. };
  174. static u32 dspi_pop_tx(struct fsl_dspi *dspi)
  175. {
  176. u32 txdata = 0;
  177. if (dspi->tx) {
  178. if (dspi->bytes_per_word == 1)
  179. txdata = *(u8 *)dspi->tx;
  180. else if (dspi->bytes_per_word == 2)
  181. txdata = *(u16 *)dspi->tx;
  182. else /* dspi->bytes_per_word == 4 */
  183. txdata = *(u32 *)dspi->tx;
  184. dspi->tx += dspi->bytes_per_word;
  185. }
  186. dspi->len -= dspi->bytes_per_word;
  187. return txdata;
  188. }
  189. static u32 dspi_pop_tx_pushr(struct fsl_dspi *dspi)
  190. {
  191. u16 cmd = dspi->tx_cmd, data = dspi_pop_tx(dspi);
  192. if (dspi->len > 0)
  193. cmd |= SPI_PUSHR_CMD_CONT;
  194. return cmd << 16 | data;
  195. }
  196. static void dspi_push_rx(struct fsl_dspi *dspi, u32 rxdata)
  197. {
  198. if (!dspi->rx)
  199. return;
  200. /* Mask of undefined bits */
  201. rxdata &= (1 << dspi->bits_per_word) - 1;
  202. if (dspi->bytes_per_word == 1)
  203. *(u8 *)dspi->rx = rxdata;
  204. else if (dspi->bytes_per_word == 2)
  205. *(u16 *)dspi->rx = rxdata;
  206. else /* dspi->bytes_per_word == 4 */
  207. *(u32 *)dspi->rx = rxdata;
  208. dspi->rx += dspi->bytes_per_word;
  209. }
  210. static void dspi_tx_dma_callback(void *arg)
  211. {
  212. struct fsl_dspi *dspi = arg;
  213. struct fsl_dspi_dma *dma = dspi->dma;
  214. complete(&dma->cmd_tx_complete);
  215. }
  216. static void dspi_rx_dma_callback(void *arg)
  217. {
  218. struct fsl_dspi *dspi = arg;
  219. struct fsl_dspi_dma *dma = dspi->dma;
  220. int i;
  221. if (dspi->rx) {
  222. for (i = 0; i < dma->curr_xfer_len; i++)
  223. dspi_push_rx(dspi, dspi->dma->rx_dma_buf[i]);
  224. }
  225. complete(&dma->cmd_rx_complete);
  226. }
  227. static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
  228. {
  229. struct fsl_dspi_dma *dma = dspi->dma;
  230. struct device *dev = &dspi->pdev->dev;
  231. int time_left;
  232. int i;
  233. for (i = 0; i < dma->curr_xfer_len; i++)
  234. dspi->dma->tx_dma_buf[i] = dspi_pop_tx_pushr(dspi);
  235. dma->tx_desc = dmaengine_prep_slave_single(dma->chan_tx,
  236. dma->tx_dma_phys,
  237. dma->curr_xfer_len *
  238. DMA_SLAVE_BUSWIDTH_4_BYTES,
  239. DMA_MEM_TO_DEV,
  240. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  241. if (!dma->tx_desc) {
  242. dev_err(dev, "Not able to get desc for DMA xfer\n");
  243. return -EIO;
  244. }
  245. dma->tx_desc->callback = dspi_tx_dma_callback;
  246. dma->tx_desc->callback_param = dspi;
  247. if (dma_submit_error(dmaengine_submit(dma->tx_desc))) {
  248. dev_err(dev, "DMA submit failed\n");
  249. return -EINVAL;
  250. }
  251. dma->rx_desc = dmaengine_prep_slave_single(dma->chan_rx,
  252. dma->rx_dma_phys,
  253. dma->curr_xfer_len *
  254. DMA_SLAVE_BUSWIDTH_4_BYTES,
  255. DMA_DEV_TO_MEM,
  256. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  257. if (!dma->rx_desc) {
  258. dev_err(dev, "Not able to get desc for DMA xfer\n");
  259. return -EIO;
  260. }
  261. dma->rx_desc->callback = dspi_rx_dma_callback;
  262. dma->rx_desc->callback_param = dspi;
  263. if (dma_submit_error(dmaengine_submit(dma->rx_desc))) {
  264. dev_err(dev, "DMA submit failed\n");
  265. return -EINVAL;
  266. }
  267. reinit_completion(&dspi->dma->cmd_rx_complete);
  268. reinit_completion(&dspi->dma->cmd_tx_complete);
  269. dma_async_issue_pending(dma->chan_rx);
  270. dma_async_issue_pending(dma->chan_tx);
  271. time_left = wait_for_completion_timeout(&dspi->dma->cmd_tx_complete,
  272. DMA_COMPLETION_TIMEOUT);
  273. if (time_left == 0) {
  274. dev_err(dev, "DMA tx timeout\n");
  275. dmaengine_terminate_all(dma->chan_tx);
  276. dmaengine_terminate_all(dma->chan_rx);
  277. return -ETIMEDOUT;
  278. }
  279. time_left = wait_for_completion_timeout(&dspi->dma->cmd_rx_complete,
  280. DMA_COMPLETION_TIMEOUT);
  281. if (time_left == 0) {
  282. dev_err(dev, "DMA rx timeout\n");
  283. dmaengine_terminate_all(dma->chan_tx);
  284. dmaengine_terminate_all(dma->chan_rx);
  285. return -ETIMEDOUT;
  286. }
  287. return 0;
  288. }
  289. static int dspi_dma_xfer(struct fsl_dspi *dspi)
  290. {
  291. struct fsl_dspi_dma *dma = dspi->dma;
  292. struct device *dev = &dspi->pdev->dev;
  293. struct spi_message *message = dspi->cur_msg;
  294. int curr_remaining_bytes;
  295. int bytes_per_buffer;
  296. int ret = 0;
  297. curr_remaining_bytes = dspi->len;
  298. bytes_per_buffer = DSPI_DMA_BUFSIZE / DSPI_FIFO_SIZE;
  299. while (curr_remaining_bytes) {
  300. /* Check if current transfer fits the DMA buffer */
  301. dma->curr_xfer_len = curr_remaining_bytes
  302. / dspi->bytes_per_word;
  303. if (dma->curr_xfer_len > bytes_per_buffer)
  304. dma->curr_xfer_len = bytes_per_buffer;
  305. ret = dspi_next_xfer_dma_submit(dspi);
  306. if (ret) {
  307. dev_err(dev, "DMA transfer failed\n");
  308. goto exit;
  309. } else {
  310. const int len =
  311. dma->curr_xfer_len * dspi->bytes_per_word;
  312. curr_remaining_bytes -= len;
  313. message->actual_length += len;
  314. if (curr_remaining_bytes < 0)
  315. curr_remaining_bytes = 0;
  316. }
  317. }
  318. exit:
  319. return ret;
  320. }
  321. static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr)
  322. {
  323. struct fsl_dspi_dma *dma;
  324. struct dma_slave_config cfg;
  325. struct device *dev = &dspi->pdev->dev;
  326. int ret;
  327. dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
  328. if (!dma)
  329. return -ENOMEM;
  330. dma->chan_rx = dma_request_slave_channel(dev, "rx");
  331. if (!dma->chan_rx) {
  332. dev_err(dev, "rx dma channel not available\n");
  333. ret = -ENODEV;
  334. return ret;
  335. }
  336. dma->chan_tx = dma_request_slave_channel(dev, "tx");
  337. if (!dma->chan_tx) {
  338. dev_err(dev, "tx dma channel not available\n");
  339. ret = -ENODEV;
  340. goto err_tx_channel;
  341. }
  342. dma->tx_dma_buf = dma_alloc_coherent(dev, DSPI_DMA_BUFSIZE,
  343. &dma->tx_dma_phys, GFP_KERNEL);
  344. if (!dma->tx_dma_buf) {
  345. ret = -ENOMEM;
  346. goto err_tx_dma_buf;
  347. }
  348. dma->rx_dma_buf = dma_alloc_coherent(dev, DSPI_DMA_BUFSIZE,
  349. &dma->rx_dma_phys, GFP_KERNEL);
  350. if (!dma->rx_dma_buf) {
  351. ret = -ENOMEM;
  352. goto err_rx_dma_buf;
  353. }
  354. cfg.src_addr = phy_addr + SPI_POPR;
  355. cfg.dst_addr = phy_addr + SPI_PUSHR;
  356. cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  357. cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  358. cfg.src_maxburst = 1;
  359. cfg.dst_maxburst = 1;
  360. cfg.direction = DMA_DEV_TO_MEM;
  361. ret = dmaengine_slave_config(dma->chan_rx, &cfg);
  362. if (ret) {
  363. dev_err(dev, "can't configure rx dma channel\n");
  364. ret = -EINVAL;
  365. goto err_slave_config;
  366. }
  367. cfg.direction = DMA_MEM_TO_DEV;
  368. ret = dmaengine_slave_config(dma->chan_tx, &cfg);
  369. if (ret) {
  370. dev_err(dev, "can't configure tx dma channel\n");
  371. ret = -EINVAL;
  372. goto err_slave_config;
  373. }
  374. dspi->dma = dma;
  375. init_completion(&dma->cmd_tx_complete);
  376. init_completion(&dma->cmd_rx_complete);
  377. return 0;
  378. err_slave_config:
  379. dma_free_coherent(dev, DSPI_DMA_BUFSIZE,
  380. dma->rx_dma_buf, dma->rx_dma_phys);
  381. err_rx_dma_buf:
  382. dma_free_coherent(dev, DSPI_DMA_BUFSIZE,
  383. dma->tx_dma_buf, dma->tx_dma_phys);
  384. err_tx_dma_buf:
  385. dma_release_channel(dma->chan_tx);
  386. err_tx_channel:
  387. dma_release_channel(dma->chan_rx);
  388. devm_kfree(dev, dma);
  389. dspi->dma = NULL;
  390. return ret;
  391. }
  392. static void dspi_release_dma(struct fsl_dspi *dspi)
  393. {
  394. struct fsl_dspi_dma *dma = dspi->dma;
  395. struct device *dev = &dspi->pdev->dev;
  396. if (dma) {
  397. if (dma->chan_tx) {
  398. dma_unmap_single(dev, dma->tx_dma_phys,
  399. DSPI_DMA_BUFSIZE, DMA_TO_DEVICE);
  400. dma_release_channel(dma->chan_tx);
  401. }
  402. if (dma->chan_rx) {
  403. dma_unmap_single(dev, dma->rx_dma_phys,
  404. DSPI_DMA_BUFSIZE, DMA_FROM_DEVICE);
  405. dma_release_channel(dma->chan_rx);
  406. }
  407. }
  408. }
  409. static void hz_to_spi_baud(char *pbr, char *br, int speed_hz,
  410. unsigned long clkrate)
  411. {
  412. /* Valid baud rate pre-scaler values */
  413. int pbr_tbl[4] = {2, 3, 5, 7};
  414. int brs[16] = { 2, 4, 6, 8,
  415. 16, 32, 64, 128,
  416. 256, 512, 1024, 2048,
  417. 4096, 8192, 16384, 32768 };
  418. int scale_needed, scale, minscale = INT_MAX;
  419. int i, j;
  420. scale_needed = clkrate / speed_hz;
  421. if (clkrate % speed_hz)
  422. scale_needed++;
  423. for (i = 0; i < ARRAY_SIZE(brs); i++)
  424. for (j = 0; j < ARRAY_SIZE(pbr_tbl); j++) {
  425. scale = brs[i] * pbr_tbl[j];
  426. if (scale >= scale_needed) {
  427. if (scale < minscale) {
  428. minscale = scale;
  429. *br = i;
  430. *pbr = j;
  431. }
  432. break;
  433. }
  434. }
  435. if (minscale == INT_MAX) {
  436. pr_warn("Can not find valid baud rate,speed_hz is %d,clkrate is %ld, we use the max prescaler value.\n",
  437. speed_hz, clkrate);
  438. *pbr = ARRAY_SIZE(pbr_tbl) - 1;
  439. *br = ARRAY_SIZE(brs) - 1;
  440. }
  441. }
  442. static void ns_delay_scale(char *psc, char *sc, int delay_ns,
  443. unsigned long clkrate)
  444. {
  445. int pscale_tbl[4] = {1, 3, 5, 7};
  446. int scale_needed, scale, minscale = INT_MAX;
  447. int i, j;
  448. u32 remainder;
  449. scale_needed = div_u64_rem((u64)delay_ns * clkrate, NSEC_PER_SEC,
  450. &remainder);
  451. if (remainder)
  452. scale_needed++;
  453. for (i = 0; i < ARRAY_SIZE(pscale_tbl); i++)
  454. for (j = 0; j <= SPI_CTAR_SCALE_BITS; j++) {
  455. scale = pscale_tbl[i] * (2 << j);
  456. if (scale >= scale_needed) {
  457. if (scale < minscale) {
  458. minscale = scale;
  459. *psc = i;
  460. *sc = j;
  461. }
  462. break;
  463. }
  464. }
  465. if (minscale == INT_MAX) {
  466. pr_warn("Cannot find correct scale values for %dns delay at clkrate %ld, using max prescaler value",
  467. delay_ns, clkrate);
  468. *psc = ARRAY_SIZE(pscale_tbl) - 1;
  469. *sc = SPI_CTAR_SCALE_BITS;
  470. }
  471. }
  472. static void fifo_write(struct fsl_dspi *dspi)
  473. {
  474. regmap_write(dspi->regmap, SPI_PUSHR, dspi_pop_tx_pushr(dspi));
  475. }
  476. static void cmd_fifo_write(struct fsl_dspi *dspi)
  477. {
  478. u16 cmd = dspi->tx_cmd;
  479. if (dspi->len > 0)
  480. cmd |= SPI_PUSHR_CMD_CONT;
  481. regmap_write(dspi->regmap_pushr, PUSHR_CMD, cmd);
  482. }
  483. static void tx_fifo_write(struct fsl_dspi *dspi, u16 txdata)
  484. {
  485. regmap_write(dspi->regmap_pushr, PUSHR_TX, txdata);
  486. }
  487. static void dspi_tcfq_write(struct fsl_dspi *dspi)
  488. {
  489. /* Clear transfer count */
  490. dspi->tx_cmd |= SPI_PUSHR_CMD_CTCNT;
  491. if (dspi->devtype_data->xspi_mode && dspi->bits_per_word > 16) {
  492. /* Write two TX FIFO entries first, and then the corresponding
  493. * CMD FIFO entry.
  494. */
  495. u32 data = dspi_pop_tx(dspi);
  496. if (dspi->cur_chip->ctar_val & SPI_CTAR_LSBFE(1)) {
  497. /* LSB */
  498. tx_fifo_write(dspi, data & 0xFFFF);
  499. tx_fifo_write(dspi, data >> 16);
  500. } else {
  501. /* MSB */
  502. tx_fifo_write(dspi, data >> 16);
  503. tx_fifo_write(dspi, data & 0xFFFF);
  504. }
  505. cmd_fifo_write(dspi);
  506. } else {
  507. /* Write one entry to both TX FIFO and CMD FIFO
  508. * simultaneously.
  509. */
  510. fifo_write(dspi);
  511. }
  512. }
  513. static u32 fifo_read(struct fsl_dspi *dspi)
  514. {
  515. u32 rxdata = 0;
  516. regmap_read(dspi->regmap, SPI_POPR, &rxdata);
  517. return rxdata;
  518. }
  519. static void dspi_tcfq_read(struct fsl_dspi *dspi)
  520. {
  521. dspi_push_rx(dspi, fifo_read(dspi));
  522. }
  523. static void dspi_eoq_write(struct fsl_dspi *dspi)
  524. {
  525. int fifo_size = DSPI_FIFO_SIZE;
  526. /* Fill TX FIFO with as many transfers as possible */
  527. while (dspi->len && fifo_size--) {
  528. /* Request EOQF for last transfer in FIFO */
  529. if (dspi->len == dspi->bytes_per_word || fifo_size == 0)
  530. dspi->tx_cmd |= SPI_PUSHR_CMD_EOQ;
  531. /* Clear transfer count for first transfer in FIFO */
  532. if (fifo_size == (DSPI_FIFO_SIZE - 1))
  533. dspi->tx_cmd |= SPI_PUSHR_CMD_CTCNT;
  534. /* Write combined TX FIFO and CMD FIFO entry */
  535. fifo_write(dspi);
  536. }
  537. }
  538. static void dspi_eoq_read(struct fsl_dspi *dspi)
  539. {
  540. int fifo_size = DSPI_FIFO_SIZE;
  541. /* Read one FIFO entry at and push to rx buffer */
  542. while ((dspi->rx < dspi->rx_end) && fifo_size--)
  543. dspi_push_rx(dspi, fifo_read(dspi));
  544. }
  545. static int dspi_transfer_one_message(struct spi_master *master,
  546. struct spi_message *message)
  547. {
  548. struct fsl_dspi *dspi = spi_master_get_devdata(master);
  549. struct spi_device *spi = message->spi;
  550. struct spi_transfer *transfer;
  551. int status = 0;
  552. enum dspi_trans_mode trans_mode;
  553. message->actual_length = 0;
  554. list_for_each_entry(transfer, &message->transfers, transfer_list) {
  555. dspi->cur_transfer = transfer;
  556. dspi->cur_msg = message;
  557. dspi->cur_chip = spi_get_ctldata(spi);
  558. /* Prepare command word for CMD FIFO */
  559. dspi->tx_cmd = SPI_PUSHR_CMD_CTAS(0) |
  560. SPI_PUSHR_CMD_PCS(spi->chip_select);
  561. if (list_is_last(&dspi->cur_transfer->transfer_list,
  562. &dspi->cur_msg->transfers)) {
  563. /* Leave PCS activated after last transfer when
  564. * cs_change is set.
  565. */
  566. if (transfer->cs_change)
  567. dspi->tx_cmd |= SPI_PUSHR_CMD_CONT;
  568. } else {
  569. /* Keep PCS active between transfers in same message
  570. * when cs_change is not set, and de-activate PCS
  571. * between transfers in the same message when
  572. * cs_change is set.
  573. */
  574. if (!transfer->cs_change)
  575. dspi->tx_cmd |= SPI_PUSHR_CMD_CONT;
  576. }
  577. dspi->void_write_data = dspi->cur_chip->void_write_data;
  578. dspi->tx = transfer->tx_buf;
  579. dspi->rx = transfer->rx_buf;
  580. dspi->rx_end = dspi->rx + transfer->len;
  581. dspi->len = transfer->len;
  582. /* Validated transfer specific frame size (defaults applied) */
  583. dspi->bits_per_word = transfer->bits_per_word;
  584. if (transfer->bits_per_word <= 8)
  585. dspi->bytes_per_word = 1;
  586. else if (transfer->bits_per_word <= 16)
  587. dspi->bytes_per_word = 2;
  588. else
  589. dspi->bytes_per_word = 4;
  590. regmap_update_bits(dspi->regmap, SPI_MCR,
  591. SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF,
  592. SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF);
  593. regmap_write(dspi->regmap, SPI_CTAR(0),
  594. dspi->cur_chip->ctar_val |
  595. SPI_FRAME_BITS(transfer->bits_per_word));
  596. if (dspi->devtype_data->xspi_mode)
  597. regmap_write(dspi->regmap, SPI_CTARE(0),
  598. SPI_FRAME_EBITS(transfer->bits_per_word)
  599. | SPI_CTARE_DTCP(1));
  600. trans_mode = dspi->devtype_data->trans_mode;
  601. switch (trans_mode) {
  602. case DSPI_EOQ_MODE:
  603. regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_EOQFE);
  604. dspi_eoq_write(dspi);
  605. break;
  606. case DSPI_TCFQ_MODE:
  607. regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_TCFQE);
  608. dspi_tcfq_write(dspi);
  609. break;
  610. case DSPI_DMA_MODE:
  611. regmap_write(dspi->regmap, SPI_RSER,
  612. SPI_RSER_TFFFE | SPI_RSER_TFFFD |
  613. SPI_RSER_RFDFE | SPI_RSER_RFDFD);
  614. status = dspi_dma_xfer(dspi);
  615. break;
  616. default:
  617. dev_err(&dspi->pdev->dev, "unsupported trans_mode %u\n",
  618. trans_mode);
  619. status = -EINVAL;
  620. goto out;
  621. }
  622. if (trans_mode != DSPI_DMA_MODE) {
  623. if (wait_event_interruptible(dspi->waitq,
  624. dspi->waitflags))
  625. dev_err(&dspi->pdev->dev,
  626. "wait transfer complete fail!\n");
  627. dspi->waitflags = 0;
  628. }
  629. if (transfer->delay_usecs)
  630. udelay(transfer->delay_usecs);
  631. }
  632. out:
  633. message->status = status;
  634. spi_finalize_current_message(master);
  635. return status;
  636. }
  637. static int dspi_setup(struct spi_device *spi)
  638. {
  639. struct chip_data *chip;
  640. struct fsl_dspi *dspi = spi_master_get_devdata(spi->master);
  641. struct fsl_dspi_platform_data *pdata;
  642. u32 cs_sck_delay = 0, sck_cs_delay = 0;
  643. unsigned char br = 0, pbr = 0, pcssck = 0, cssck = 0;
  644. unsigned char pasc = 0, asc = 0;
  645. unsigned long clkrate;
  646. /* Only alloc on first setup */
  647. chip = spi_get_ctldata(spi);
  648. if (chip == NULL) {
  649. chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
  650. if (!chip)
  651. return -ENOMEM;
  652. }
  653. pdata = dev_get_platdata(&dspi->pdev->dev);
  654. if (!pdata) {
  655. of_property_read_u32(spi->dev.of_node, "fsl,spi-cs-sck-delay",
  656. &cs_sck_delay);
  657. of_property_read_u32(spi->dev.of_node, "fsl,spi-sck-cs-delay",
  658. &sck_cs_delay);
  659. } else {
  660. cs_sck_delay = pdata->cs_sck_delay;
  661. sck_cs_delay = pdata->sck_cs_delay;
  662. }
  663. chip->void_write_data = 0;
  664. clkrate = clk_get_rate(dspi->clk);
  665. hz_to_spi_baud(&pbr, &br, spi->max_speed_hz, clkrate);
  666. /* Set PCS to SCK delay scale values */
  667. ns_delay_scale(&pcssck, &cssck, cs_sck_delay, clkrate);
  668. /* Set After SCK delay scale values */
  669. ns_delay_scale(&pasc, &asc, sck_cs_delay, clkrate);
  670. chip->ctar_val = SPI_CTAR_CPOL(spi->mode & SPI_CPOL ? 1 : 0)
  671. | SPI_CTAR_CPHA(spi->mode & SPI_CPHA ? 1 : 0)
  672. | SPI_CTAR_LSBFE(spi->mode & SPI_LSB_FIRST ? 1 : 0)
  673. | SPI_CTAR_PCSSCK(pcssck)
  674. | SPI_CTAR_CSSCK(cssck)
  675. | SPI_CTAR_PASC(pasc)
  676. | SPI_CTAR_ASC(asc)
  677. | SPI_CTAR_PBR(pbr)
  678. | SPI_CTAR_BR(br);
  679. spi_set_ctldata(spi, chip);
  680. return 0;
  681. }
  682. static void dspi_cleanup(struct spi_device *spi)
  683. {
  684. struct chip_data *chip = spi_get_ctldata((struct spi_device *)spi);
  685. dev_dbg(&spi->dev, "spi_device %u.%u cleanup\n",
  686. spi->master->bus_num, spi->chip_select);
  687. kfree(chip);
  688. }
  689. static irqreturn_t dspi_interrupt(int irq, void *dev_id)
  690. {
  691. struct fsl_dspi *dspi = (struct fsl_dspi *)dev_id;
  692. struct spi_message *msg = dspi->cur_msg;
  693. enum dspi_trans_mode trans_mode;
  694. u32 spi_sr, spi_tcr;
  695. u16 spi_tcnt;
  696. regmap_read(dspi->regmap, SPI_SR, &spi_sr);
  697. regmap_write(dspi->regmap, SPI_SR, spi_sr);
  698. if (spi_sr & (SPI_SR_EOQF | SPI_SR_TCFQF)) {
  699. /* Get transfer counter (in number of SPI transfers). It was
  700. * reset to 0 when transfer(s) were started.
  701. */
  702. regmap_read(dspi->regmap, SPI_TCR, &spi_tcr);
  703. spi_tcnt = SPI_TCR_GET_TCNT(spi_tcr);
  704. /* Update total number of bytes that were transferred */
  705. msg->actual_length += spi_tcnt * dspi->bytes_per_word;
  706. trans_mode = dspi->devtype_data->trans_mode;
  707. switch (trans_mode) {
  708. case DSPI_EOQ_MODE:
  709. dspi_eoq_read(dspi);
  710. break;
  711. case DSPI_TCFQ_MODE:
  712. dspi_tcfq_read(dspi);
  713. break;
  714. default:
  715. dev_err(&dspi->pdev->dev, "unsupported trans_mode %u\n",
  716. trans_mode);
  717. return IRQ_HANDLED;
  718. }
  719. if (!dspi->len) {
  720. dspi->waitflags = 1;
  721. wake_up_interruptible(&dspi->waitq);
  722. } else {
  723. switch (trans_mode) {
  724. case DSPI_EOQ_MODE:
  725. dspi_eoq_write(dspi);
  726. break;
  727. case DSPI_TCFQ_MODE:
  728. dspi_tcfq_write(dspi);
  729. break;
  730. default:
  731. dev_err(&dspi->pdev->dev,
  732. "unsupported trans_mode %u\n",
  733. trans_mode);
  734. }
  735. }
  736. }
  737. return IRQ_HANDLED;
  738. }
  739. static const struct of_device_id fsl_dspi_dt_ids[] = {
  740. { .compatible = "fsl,vf610-dspi", .data = &vf610_data, },
  741. { .compatible = "fsl,ls1021a-v1.0-dspi", .data = &ls1021a_v1_data, },
  742. { .compatible = "fsl,ls2085a-dspi", .data = &ls2085a_data, },
  743. { /* sentinel */ }
  744. };
  745. MODULE_DEVICE_TABLE(of, fsl_dspi_dt_ids);
  746. #ifdef CONFIG_PM_SLEEP
  747. static int dspi_suspend(struct device *dev)
  748. {
  749. struct spi_master *master = dev_get_drvdata(dev);
  750. struct fsl_dspi *dspi = spi_master_get_devdata(master);
  751. spi_master_suspend(master);
  752. clk_disable_unprepare(dspi->clk);
  753. pinctrl_pm_select_sleep_state(dev);
  754. return 0;
  755. }
  756. static int dspi_resume(struct device *dev)
  757. {
  758. struct spi_master *master = dev_get_drvdata(dev);
  759. struct fsl_dspi *dspi = spi_master_get_devdata(master);
  760. int ret;
  761. pinctrl_pm_select_default_state(dev);
  762. ret = clk_prepare_enable(dspi->clk);
  763. if (ret)
  764. return ret;
  765. spi_master_resume(master);
  766. return 0;
  767. }
  768. #endif /* CONFIG_PM_SLEEP */
  769. static SIMPLE_DEV_PM_OPS(dspi_pm, dspi_suspend, dspi_resume);
  770. static const struct regmap_range dspi_volatile_ranges[] = {
  771. regmap_reg_range(SPI_MCR, SPI_TCR),
  772. regmap_reg_range(SPI_SR, SPI_SR),
  773. regmap_reg_range(SPI_PUSHR, SPI_RXFR3),
  774. };
  775. static const struct regmap_access_table dspi_volatile_table = {
  776. .yes_ranges = dspi_volatile_ranges,
  777. .n_yes_ranges = ARRAY_SIZE(dspi_volatile_ranges),
  778. };
  779. static const struct regmap_config dspi_regmap_config = {
  780. .reg_bits = 32,
  781. .val_bits = 32,
  782. .reg_stride = 4,
  783. .max_register = 0x88,
  784. .volatile_table = &dspi_volatile_table,
  785. };
  786. static const struct regmap_range dspi_xspi_volatile_ranges[] = {
  787. regmap_reg_range(SPI_MCR, SPI_TCR),
  788. regmap_reg_range(SPI_SR, SPI_SR),
  789. regmap_reg_range(SPI_PUSHR, SPI_RXFR3),
  790. regmap_reg_range(SPI_SREX, SPI_SREX),
  791. };
  792. static const struct regmap_access_table dspi_xspi_volatile_table = {
  793. .yes_ranges = dspi_xspi_volatile_ranges,
  794. .n_yes_ranges = ARRAY_SIZE(dspi_xspi_volatile_ranges),
  795. };
  796. static const struct regmap_config dspi_xspi_regmap_config[] = {
  797. {
  798. .reg_bits = 32,
  799. .val_bits = 32,
  800. .reg_stride = 4,
  801. .max_register = 0x13c,
  802. .volatile_table = &dspi_xspi_volatile_table,
  803. },
  804. {
  805. .name = "pushr",
  806. .reg_bits = 16,
  807. .val_bits = 16,
  808. .reg_stride = 2,
  809. .max_register = 0x2,
  810. },
  811. };
  812. static void dspi_init(struct fsl_dspi *dspi)
  813. {
  814. regmap_write(dspi->regmap, SPI_MCR, SPI_MCR_MASTER | SPI_MCR_PCSIS |
  815. (dspi->devtype_data->xspi_mode ? SPI_MCR_XSPI : 0));
  816. regmap_write(dspi->regmap, SPI_SR, SPI_SR_CLEAR);
  817. if (dspi->devtype_data->xspi_mode)
  818. regmap_write(dspi->regmap, SPI_CTARE(0),
  819. SPI_CTARE_FMSZE(0) | SPI_CTARE_DTCP(1));
  820. }
  821. static int dspi_probe(struct platform_device *pdev)
  822. {
  823. struct device_node *np = pdev->dev.of_node;
  824. struct spi_master *master;
  825. struct fsl_dspi *dspi;
  826. struct resource *res;
  827. const struct regmap_config *regmap_config;
  828. void __iomem *base;
  829. struct fsl_dspi_platform_data *pdata;
  830. int ret = 0, cs_num, bus_num;
  831. master = spi_alloc_master(&pdev->dev, sizeof(struct fsl_dspi));
  832. if (!master)
  833. return -ENOMEM;
  834. dspi = spi_master_get_devdata(master);
  835. dspi->pdev = pdev;
  836. dspi->master = master;
  837. master->transfer = NULL;
  838. master->setup = dspi_setup;
  839. master->transfer_one_message = dspi_transfer_one_message;
  840. master->dev.of_node = pdev->dev.of_node;
  841. master->cleanup = dspi_cleanup;
  842. master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
  843. pdata = dev_get_platdata(&pdev->dev);
  844. if (pdata) {
  845. master->num_chipselect = pdata->cs_num;
  846. master->bus_num = pdata->bus_num;
  847. dspi->devtype_data = &coldfire_data;
  848. } else {
  849. ret = of_property_read_u32(np, "spi-num-chipselects", &cs_num);
  850. if (ret < 0) {
  851. dev_err(&pdev->dev, "can't get spi-num-chipselects\n");
  852. goto out_master_put;
  853. }
  854. master->num_chipselect = cs_num;
  855. ret = of_property_read_u32(np, "bus-num", &bus_num);
  856. if (ret < 0) {
  857. dev_err(&pdev->dev, "can't get bus-num\n");
  858. goto out_master_put;
  859. }
  860. master->bus_num = bus_num;
  861. dspi->devtype_data = of_device_get_match_data(&pdev->dev);
  862. if (!dspi->devtype_data) {
  863. dev_err(&pdev->dev, "can't get devtype_data\n");
  864. ret = -EFAULT;
  865. goto out_master_put;
  866. }
  867. }
  868. if (dspi->devtype_data->xspi_mode)
  869. master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
  870. else
  871. master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
  872. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  873. base = devm_ioremap_resource(&pdev->dev, res);
  874. if (IS_ERR(base)) {
  875. ret = PTR_ERR(base);
  876. goto out_master_put;
  877. }
  878. if (dspi->devtype_data->xspi_mode)
  879. regmap_config = &dspi_xspi_regmap_config[0];
  880. else
  881. regmap_config = &dspi_regmap_config;
  882. dspi->regmap = devm_regmap_init_mmio(&pdev->dev, base, regmap_config);
  883. if (IS_ERR(dspi->regmap)) {
  884. dev_err(&pdev->dev, "failed to init regmap: %ld\n",
  885. PTR_ERR(dspi->regmap));
  886. ret = PTR_ERR(dspi->regmap);
  887. goto out_master_put;
  888. }
  889. if (dspi->devtype_data->xspi_mode) {
  890. dspi->regmap_pushr = devm_regmap_init_mmio(
  891. &pdev->dev, base + SPI_PUSHR,
  892. &dspi_xspi_regmap_config[1]);
  893. if (IS_ERR(dspi->regmap_pushr)) {
  894. dev_err(&pdev->dev,
  895. "failed to init pushr regmap: %ld\n",
  896. PTR_ERR(dspi->regmap_pushr));
  897. ret = PTR_ERR(dspi->regmap_pushr);
  898. goto out_master_put;
  899. }
  900. }
  901. dspi->clk = devm_clk_get(&pdev->dev, "dspi");
  902. if (IS_ERR(dspi->clk)) {
  903. ret = PTR_ERR(dspi->clk);
  904. dev_err(&pdev->dev, "unable to get clock\n");
  905. goto out_master_put;
  906. }
  907. ret = clk_prepare_enable(dspi->clk);
  908. if (ret)
  909. goto out_master_put;
  910. dspi_init(dspi);
  911. dspi->irq = platform_get_irq(pdev, 0);
  912. if (dspi->irq < 0) {
  913. dev_err(&pdev->dev, "can't get platform irq\n");
  914. ret = dspi->irq;
  915. goto out_clk_put;
  916. }
  917. ret = devm_request_irq(&pdev->dev, dspi->irq, dspi_interrupt, 0,
  918. pdev->name, dspi);
  919. if (ret < 0) {
  920. dev_err(&pdev->dev, "Unable to attach DSPI interrupt\n");
  921. goto out_clk_put;
  922. }
  923. if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) {
  924. ret = dspi_request_dma(dspi, res->start);
  925. if (ret < 0) {
  926. dev_err(&pdev->dev, "can't get dma channels\n");
  927. goto out_clk_put;
  928. }
  929. }
  930. master->max_speed_hz =
  931. clk_get_rate(dspi->clk) / dspi->devtype_data->max_clock_factor;
  932. init_waitqueue_head(&dspi->waitq);
  933. platform_set_drvdata(pdev, master);
  934. ret = spi_register_master(master);
  935. if (ret != 0) {
  936. dev_err(&pdev->dev, "Problem registering DSPI master\n");
  937. goto out_clk_put;
  938. }
  939. return ret;
  940. out_clk_put:
  941. clk_disable_unprepare(dspi->clk);
  942. out_master_put:
  943. spi_master_put(master);
  944. return ret;
  945. }
  946. static int dspi_remove(struct platform_device *pdev)
  947. {
  948. struct spi_master *master = platform_get_drvdata(pdev);
  949. struct fsl_dspi *dspi = spi_master_get_devdata(master);
  950. /* Disconnect from the SPI framework */
  951. dspi_release_dma(dspi);
  952. clk_disable_unprepare(dspi->clk);
  953. spi_unregister_master(dspi->master);
  954. return 0;
  955. }
  956. static struct platform_driver fsl_dspi_driver = {
  957. .driver.name = DRIVER_NAME,
  958. .driver.of_match_table = fsl_dspi_dt_ids,
  959. .driver.owner = THIS_MODULE,
  960. .driver.pm = &dspi_pm,
  961. .probe = dspi_probe,
  962. .remove = dspi_remove,
  963. };
  964. module_platform_driver(fsl_dspi_driver);
  965. MODULE_DESCRIPTION("Freescale DSPI Controller Driver");
  966. MODULE_LICENSE("GPL");
  967. MODULE_ALIAS("platform:" DRIVER_NAME);