spi-fsl-dspi.c 29 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154
  1. /*
  2. * drivers/spi/spi-fsl-dspi.c
  3. *
  4. * Copyright 2013 Freescale Semiconductor, Inc.
  5. *
  6. * Freescale DSPI driver
  7. * This file contains a driver for the Freescale DSPI
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation; either version 2 of the License, or
  12. * (at your option) any later version.
  13. *
  14. */
  15. #include <linux/clk.h>
  16. #include <linux/delay.h>
  17. #include <linux/dmaengine.h>
  18. #include <linux/dma-mapping.h>
  19. #include <linux/err.h>
  20. #include <linux/errno.h>
  21. #include <linux/interrupt.h>
  22. #include <linux/io.h>
  23. #include <linux/kernel.h>
  24. #include <linux/math64.h>
  25. #include <linux/module.h>
  26. #include <linux/of.h>
  27. #include <linux/of_device.h>
  28. #include <linux/pinctrl/consumer.h>
  29. #include <linux/platform_device.h>
  30. #include <linux/pm_runtime.h>
  31. #include <linux/regmap.h>
  32. #include <linux/sched.h>
  33. #include <linux/spi/spi.h>
  34. #include <linux/spi/spi-fsl-dspi.h>
  35. #include <linux/spi/spi_bitbang.h>
  36. #include <linux/time.h>
  37. #define DRIVER_NAME "fsl-dspi"
  38. #define DSPI_FIFO_SIZE 4
  39. #define DSPI_DMA_BUFSIZE (DSPI_FIFO_SIZE * 1024)
  40. #define SPI_MCR 0x00
  41. #define SPI_MCR_MASTER (1 << 31)
  42. #define SPI_MCR_PCSIS (0x3F << 16)
  43. #define SPI_MCR_CLR_TXF (1 << 11)
  44. #define SPI_MCR_CLR_RXF (1 << 10)
  45. #define SPI_MCR_XSPI (1 << 3)
  46. #define SPI_TCR 0x08
  47. #define SPI_TCR_GET_TCNT(x) (((x) & 0xffff0000) >> 16)
  48. #define SPI_CTAR(x) (0x0c + (((x) & 0x3) * 4))
  49. #define SPI_CTAR_FMSZ(x) (((x) & 0x0000000f) << 27)
  50. #define SPI_CTAR_CPOL(x) ((x) << 26)
  51. #define SPI_CTAR_CPHA(x) ((x) << 25)
  52. #define SPI_CTAR_LSBFE(x) ((x) << 24)
  53. #define SPI_CTAR_PCSSCK(x) (((x) & 0x00000003) << 22)
  54. #define SPI_CTAR_PASC(x) (((x) & 0x00000003) << 20)
  55. #define SPI_CTAR_PDT(x) (((x) & 0x00000003) << 18)
  56. #define SPI_CTAR_PBR(x) (((x) & 0x00000003) << 16)
  57. #define SPI_CTAR_CSSCK(x) (((x) & 0x0000000f) << 12)
  58. #define SPI_CTAR_ASC(x) (((x) & 0x0000000f) << 8)
  59. #define SPI_CTAR_DT(x) (((x) & 0x0000000f) << 4)
  60. #define SPI_CTAR_BR(x) ((x) & 0x0000000f)
  61. #define SPI_CTAR_SCALE_BITS 0xf
  62. #define SPI_CTAR0_SLAVE 0x0c
  63. #define SPI_SR 0x2c
  64. #define SPI_SR_EOQF 0x10000000
  65. #define SPI_SR_TCFQF 0x80000000
  66. #define SPI_SR_CLEAR 0xdaad0000
  67. #define SPI_RSER_TFFFE BIT(25)
  68. #define SPI_RSER_TFFFD BIT(24)
  69. #define SPI_RSER_RFDFE BIT(17)
  70. #define SPI_RSER_RFDFD BIT(16)
  71. #define SPI_RSER 0x30
  72. #define SPI_RSER_EOQFE 0x10000000
  73. #define SPI_RSER_TCFQE 0x80000000
  74. #define SPI_PUSHR 0x34
  75. #define SPI_PUSHR_CMD_CONT (1 << 15)
  76. #define SPI_PUSHR_CONT (SPI_PUSHR_CMD_CONT << 16)
  77. #define SPI_PUSHR_CMD_CTAS(x) (((x) & 0x0003) << 12)
  78. #define SPI_PUSHR_CTAS(x) (SPI_PUSHR_CMD_CTAS(x) << 16)
  79. #define SPI_PUSHR_CMD_EOQ (1 << 11)
  80. #define SPI_PUSHR_EOQ (SPI_PUSHR_CMD_EOQ << 16)
  81. #define SPI_PUSHR_CMD_CTCNT (1 << 10)
  82. #define SPI_PUSHR_CTCNT (SPI_PUSHR_CMD_CTCNT << 16)
  83. #define SPI_PUSHR_CMD_PCS(x) ((1 << x) & 0x003f)
  84. #define SPI_PUSHR_PCS(x) (SPI_PUSHR_CMD_PCS(x) << 16)
  85. #define SPI_PUSHR_TXDATA(x) ((x) & 0x0000ffff)
  86. #define SPI_PUSHR_SLAVE 0x34
  87. #define SPI_POPR 0x38
  88. #define SPI_POPR_RXDATA(x) ((x) & 0x0000ffff)
  89. #define SPI_TXFR0 0x3c
  90. #define SPI_TXFR1 0x40
  91. #define SPI_TXFR2 0x44
  92. #define SPI_TXFR3 0x48
  93. #define SPI_RXFR0 0x7c
  94. #define SPI_RXFR1 0x80
  95. #define SPI_RXFR2 0x84
  96. #define SPI_RXFR3 0x88
  97. #define SPI_CTARE(x) (0x11c + (((x) & 0x3) * 4))
  98. #define SPI_CTARE_FMSZE(x) (((x) & 0x1) << 16)
  99. #define SPI_CTARE_DTCP(x) ((x) & 0x7ff)
  100. #define SPI_SREX 0x13c
  101. #define SPI_FRAME_BITS(bits) SPI_CTAR_FMSZ((bits) - 1)
  102. #define SPI_FRAME_BITS_MASK SPI_CTAR_FMSZ(0xf)
  103. #define SPI_FRAME_BITS_16 SPI_CTAR_FMSZ(0xf)
  104. #define SPI_FRAME_BITS_8 SPI_CTAR_FMSZ(0x7)
  105. #define SPI_FRAME_EBITS(bits) SPI_CTARE_FMSZE(((bits) - 1) >> 4)
  106. #define SPI_FRAME_EBITS_MASK SPI_CTARE_FMSZE(1)
  107. /* Register offsets for regmap_pushr */
  108. #define PUSHR_CMD 0x0
  109. #define PUSHR_TX 0x2
  110. #define SPI_CS_INIT 0x01
  111. #define SPI_CS_ASSERT 0x02
  112. #define SPI_CS_DROP 0x04
  113. #define DMA_COMPLETION_TIMEOUT msecs_to_jiffies(3000)
  114. struct chip_data {
  115. u32 ctar_val;
  116. u16 void_write_data;
  117. };
  118. enum dspi_trans_mode {
  119. DSPI_EOQ_MODE = 0,
  120. DSPI_TCFQ_MODE,
  121. DSPI_DMA_MODE,
  122. };
  123. struct fsl_dspi_devtype_data {
  124. enum dspi_trans_mode trans_mode;
  125. u8 max_clock_factor;
  126. bool xspi_mode;
  127. };
  128. static const struct fsl_dspi_devtype_data vf610_data = {
  129. .trans_mode = DSPI_DMA_MODE,
  130. .max_clock_factor = 2,
  131. };
  132. static const struct fsl_dspi_devtype_data ls1021a_v1_data = {
  133. .trans_mode = DSPI_TCFQ_MODE,
  134. .max_clock_factor = 8,
  135. .xspi_mode = true,
  136. };
  137. static const struct fsl_dspi_devtype_data ls2085a_data = {
  138. .trans_mode = DSPI_TCFQ_MODE,
  139. .max_clock_factor = 8,
  140. };
  141. static const struct fsl_dspi_devtype_data coldfire_data = {
  142. .trans_mode = DSPI_EOQ_MODE,
  143. .max_clock_factor = 8,
  144. };
  145. struct fsl_dspi_dma {
  146. /* Length of transfer in words of DSPI_FIFO_SIZE */
  147. u32 curr_xfer_len;
  148. u32 *tx_dma_buf;
  149. struct dma_chan *chan_tx;
  150. dma_addr_t tx_dma_phys;
  151. struct completion cmd_tx_complete;
  152. struct dma_async_tx_descriptor *tx_desc;
  153. u32 *rx_dma_buf;
  154. struct dma_chan *chan_rx;
  155. dma_addr_t rx_dma_phys;
  156. struct completion cmd_rx_complete;
  157. struct dma_async_tx_descriptor *rx_desc;
  158. };
  159. struct fsl_dspi {
  160. struct spi_master *master;
  161. struct platform_device *pdev;
  162. struct regmap *regmap;
  163. struct regmap *regmap_pushr;
  164. int irq;
  165. struct clk *clk;
  166. struct spi_transfer *cur_transfer;
  167. struct spi_message *cur_msg;
  168. struct chip_data *cur_chip;
  169. size_t len;
  170. const void *tx;
  171. void *rx;
  172. void *rx_end;
  173. u16 void_write_data;
  174. u16 tx_cmd;
  175. u8 bits_per_word;
  176. u8 bytes_per_word;
  177. const struct fsl_dspi_devtype_data *devtype_data;
  178. wait_queue_head_t waitq;
  179. u32 waitflags;
  180. struct fsl_dspi_dma *dma;
  181. };
  182. static u32 dspi_pop_tx(struct fsl_dspi *dspi)
  183. {
  184. u32 txdata = 0;
  185. if (dspi->tx) {
  186. if (dspi->bytes_per_word == 1)
  187. txdata = *(u8 *)dspi->tx;
  188. else if (dspi->bytes_per_word == 2)
  189. txdata = *(u16 *)dspi->tx;
  190. else /* dspi->bytes_per_word == 4 */
  191. txdata = *(u32 *)dspi->tx;
  192. dspi->tx += dspi->bytes_per_word;
  193. }
  194. dspi->len -= dspi->bytes_per_word;
  195. return txdata;
  196. }
  197. static u32 dspi_pop_tx_pushr(struct fsl_dspi *dspi)
  198. {
  199. u16 cmd = dspi->tx_cmd, data = dspi_pop_tx(dspi);
  200. if (dspi->len > 0)
  201. cmd |= SPI_PUSHR_CMD_CONT;
  202. return cmd << 16 | data;
  203. }
  204. static void dspi_push_rx(struct fsl_dspi *dspi, u32 rxdata)
  205. {
  206. if (!dspi->rx)
  207. return;
  208. /* Mask of undefined bits */
  209. rxdata &= (1 << dspi->bits_per_word) - 1;
  210. if (dspi->bytes_per_word == 1)
  211. *(u8 *)dspi->rx = rxdata;
  212. else if (dspi->bytes_per_word == 2)
  213. *(u16 *)dspi->rx = rxdata;
  214. else /* dspi->bytes_per_word == 4 */
  215. *(u32 *)dspi->rx = rxdata;
  216. dspi->rx += dspi->bytes_per_word;
  217. }
  218. static void dspi_tx_dma_callback(void *arg)
  219. {
  220. struct fsl_dspi *dspi = arg;
  221. struct fsl_dspi_dma *dma = dspi->dma;
  222. complete(&dma->cmd_tx_complete);
  223. }
  224. static void dspi_rx_dma_callback(void *arg)
  225. {
  226. struct fsl_dspi *dspi = arg;
  227. struct fsl_dspi_dma *dma = dspi->dma;
  228. int i;
  229. if (dspi->rx) {
  230. for (i = 0; i < dma->curr_xfer_len; i++)
  231. dspi_push_rx(dspi, dspi->dma->rx_dma_buf[i]);
  232. }
  233. complete(&dma->cmd_rx_complete);
  234. }
  235. static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
  236. {
  237. struct fsl_dspi_dma *dma = dspi->dma;
  238. struct device *dev = &dspi->pdev->dev;
  239. int time_left;
  240. int i;
  241. for (i = 0; i < dma->curr_xfer_len; i++)
  242. dspi->dma->tx_dma_buf[i] = dspi_pop_tx_pushr(dspi);
  243. dma->tx_desc = dmaengine_prep_slave_single(dma->chan_tx,
  244. dma->tx_dma_phys,
  245. dma->curr_xfer_len *
  246. DMA_SLAVE_BUSWIDTH_4_BYTES,
  247. DMA_MEM_TO_DEV,
  248. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  249. if (!dma->tx_desc) {
  250. dev_err(dev, "Not able to get desc for DMA xfer\n");
  251. return -EIO;
  252. }
  253. dma->tx_desc->callback = dspi_tx_dma_callback;
  254. dma->tx_desc->callback_param = dspi;
  255. if (dma_submit_error(dmaengine_submit(dma->tx_desc))) {
  256. dev_err(dev, "DMA submit failed\n");
  257. return -EINVAL;
  258. }
  259. dma->rx_desc = dmaengine_prep_slave_single(dma->chan_rx,
  260. dma->rx_dma_phys,
  261. dma->curr_xfer_len *
  262. DMA_SLAVE_BUSWIDTH_4_BYTES,
  263. DMA_DEV_TO_MEM,
  264. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  265. if (!dma->rx_desc) {
  266. dev_err(dev, "Not able to get desc for DMA xfer\n");
  267. return -EIO;
  268. }
  269. dma->rx_desc->callback = dspi_rx_dma_callback;
  270. dma->rx_desc->callback_param = dspi;
  271. if (dma_submit_error(dmaengine_submit(dma->rx_desc))) {
  272. dev_err(dev, "DMA submit failed\n");
  273. return -EINVAL;
  274. }
  275. reinit_completion(&dspi->dma->cmd_rx_complete);
  276. reinit_completion(&dspi->dma->cmd_tx_complete);
  277. dma_async_issue_pending(dma->chan_rx);
  278. dma_async_issue_pending(dma->chan_tx);
  279. time_left = wait_for_completion_timeout(&dspi->dma->cmd_tx_complete,
  280. DMA_COMPLETION_TIMEOUT);
  281. if (time_left == 0) {
  282. dev_err(dev, "DMA tx timeout\n");
  283. dmaengine_terminate_all(dma->chan_tx);
  284. dmaengine_terminate_all(dma->chan_rx);
  285. return -ETIMEDOUT;
  286. }
  287. time_left = wait_for_completion_timeout(&dspi->dma->cmd_rx_complete,
  288. DMA_COMPLETION_TIMEOUT);
  289. if (time_left == 0) {
  290. dev_err(dev, "DMA rx timeout\n");
  291. dmaengine_terminate_all(dma->chan_tx);
  292. dmaengine_terminate_all(dma->chan_rx);
  293. return -ETIMEDOUT;
  294. }
  295. return 0;
  296. }
  297. static int dspi_dma_xfer(struct fsl_dspi *dspi)
  298. {
  299. struct fsl_dspi_dma *dma = dspi->dma;
  300. struct device *dev = &dspi->pdev->dev;
  301. int curr_remaining_bytes;
  302. int bytes_per_buffer;
  303. int ret = 0;
  304. curr_remaining_bytes = dspi->len;
  305. bytes_per_buffer = DSPI_DMA_BUFSIZE / DSPI_FIFO_SIZE;
  306. while (curr_remaining_bytes) {
  307. /* Check if current transfer fits the DMA buffer */
  308. dma->curr_xfer_len = curr_remaining_bytes
  309. / dspi->bytes_per_word;
  310. if (dma->curr_xfer_len > bytes_per_buffer)
  311. dma->curr_xfer_len = bytes_per_buffer;
  312. ret = dspi_next_xfer_dma_submit(dspi);
  313. if (ret) {
  314. dev_err(dev, "DMA transfer failed\n");
  315. goto exit;
  316. } else {
  317. curr_remaining_bytes -= dma->curr_xfer_len
  318. * dspi->bytes_per_word;
  319. if (curr_remaining_bytes < 0)
  320. curr_remaining_bytes = 0;
  321. }
  322. }
  323. exit:
  324. return ret;
  325. }
  326. static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr)
  327. {
  328. struct fsl_dspi_dma *dma;
  329. struct dma_slave_config cfg;
  330. struct device *dev = &dspi->pdev->dev;
  331. int ret;
  332. dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
  333. if (!dma)
  334. return -ENOMEM;
  335. dma->chan_rx = dma_request_slave_channel(dev, "rx");
  336. if (!dma->chan_rx) {
  337. dev_err(dev, "rx dma channel not available\n");
  338. ret = -ENODEV;
  339. return ret;
  340. }
  341. dma->chan_tx = dma_request_slave_channel(dev, "tx");
  342. if (!dma->chan_tx) {
  343. dev_err(dev, "tx dma channel not available\n");
  344. ret = -ENODEV;
  345. goto err_tx_channel;
  346. }
  347. dma->tx_dma_buf = dma_alloc_coherent(dev, DSPI_DMA_BUFSIZE,
  348. &dma->tx_dma_phys, GFP_KERNEL);
  349. if (!dma->tx_dma_buf) {
  350. ret = -ENOMEM;
  351. goto err_tx_dma_buf;
  352. }
  353. dma->rx_dma_buf = dma_alloc_coherent(dev, DSPI_DMA_BUFSIZE,
  354. &dma->rx_dma_phys, GFP_KERNEL);
  355. if (!dma->rx_dma_buf) {
  356. ret = -ENOMEM;
  357. goto err_rx_dma_buf;
  358. }
  359. cfg.src_addr = phy_addr + SPI_POPR;
  360. cfg.dst_addr = phy_addr + SPI_PUSHR;
  361. cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  362. cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  363. cfg.src_maxburst = 1;
  364. cfg.dst_maxburst = 1;
  365. cfg.direction = DMA_DEV_TO_MEM;
  366. ret = dmaengine_slave_config(dma->chan_rx, &cfg);
  367. if (ret) {
  368. dev_err(dev, "can't configure rx dma channel\n");
  369. ret = -EINVAL;
  370. goto err_slave_config;
  371. }
  372. cfg.direction = DMA_MEM_TO_DEV;
  373. ret = dmaengine_slave_config(dma->chan_tx, &cfg);
  374. if (ret) {
  375. dev_err(dev, "can't configure tx dma channel\n");
  376. ret = -EINVAL;
  377. goto err_slave_config;
  378. }
  379. dspi->dma = dma;
  380. init_completion(&dma->cmd_tx_complete);
  381. init_completion(&dma->cmd_rx_complete);
  382. return 0;
  383. err_slave_config:
  384. dma_free_coherent(dev, DSPI_DMA_BUFSIZE,
  385. dma->rx_dma_buf, dma->rx_dma_phys);
  386. err_rx_dma_buf:
  387. dma_free_coherent(dev, DSPI_DMA_BUFSIZE,
  388. dma->tx_dma_buf, dma->tx_dma_phys);
  389. err_tx_dma_buf:
  390. dma_release_channel(dma->chan_tx);
  391. err_tx_channel:
  392. dma_release_channel(dma->chan_rx);
  393. devm_kfree(dev, dma);
  394. dspi->dma = NULL;
  395. return ret;
  396. }
  397. static void dspi_release_dma(struct fsl_dspi *dspi)
  398. {
  399. struct fsl_dspi_dma *dma = dspi->dma;
  400. struct device *dev = &dspi->pdev->dev;
  401. if (dma) {
  402. if (dma->chan_tx) {
  403. dma_unmap_single(dev, dma->tx_dma_phys,
  404. DSPI_DMA_BUFSIZE, DMA_TO_DEVICE);
  405. dma_release_channel(dma->chan_tx);
  406. }
  407. if (dma->chan_rx) {
  408. dma_unmap_single(dev, dma->rx_dma_phys,
  409. DSPI_DMA_BUFSIZE, DMA_FROM_DEVICE);
  410. dma_release_channel(dma->chan_rx);
  411. }
  412. }
  413. }
  414. static void hz_to_spi_baud(char *pbr, char *br, int speed_hz,
  415. unsigned long clkrate)
  416. {
  417. /* Valid baud rate pre-scaler values */
  418. int pbr_tbl[4] = {2, 3, 5, 7};
  419. int brs[16] = { 2, 4, 6, 8,
  420. 16, 32, 64, 128,
  421. 256, 512, 1024, 2048,
  422. 4096, 8192, 16384, 32768 };
  423. int scale_needed, scale, minscale = INT_MAX;
  424. int i, j;
  425. scale_needed = clkrate / speed_hz;
  426. if (clkrate % speed_hz)
  427. scale_needed++;
  428. for (i = 0; i < ARRAY_SIZE(brs); i++)
  429. for (j = 0; j < ARRAY_SIZE(pbr_tbl); j++) {
  430. scale = brs[i] * pbr_tbl[j];
  431. if (scale >= scale_needed) {
  432. if (scale < minscale) {
  433. minscale = scale;
  434. *br = i;
  435. *pbr = j;
  436. }
  437. break;
  438. }
  439. }
  440. if (minscale == INT_MAX) {
  441. pr_warn("Can not find valid baud rate,speed_hz is %d,clkrate is %ld, we use the max prescaler value.\n",
  442. speed_hz, clkrate);
  443. *pbr = ARRAY_SIZE(pbr_tbl) - 1;
  444. *br = ARRAY_SIZE(brs) - 1;
  445. }
  446. }
  447. static void ns_delay_scale(char *psc, char *sc, int delay_ns,
  448. unsigned long clkrate)
  449. {
  450. int pscale_tbl[4] = {1, 3, 5, 7};
  451. int scale_needed, scale, minscale = INT_MAX;
  452. int i, j;
  453. u32 remainder;
  454. scale_needed = div_u64_rem((u64)delay_ns * clkrate, NSEC_PER_SEC,
  455. &remainder);
  456. if (remainder)
  457. scale_needed++;
  458. for (i = 0; i < ARRAY_SIZE(pscale_tbl); i++)
  459. for (j = 0; j <= SPI_CTAR_SCALE_BITS; j++) {
  460. scale = pscale_tbl[i] * (2 << j);
  461. if (scale >= scale_needed) {
  462. if (scale < minscale) {
  463. minscale = scale;
  464. *psc = i;
  465. *sc = j;
  466. }
  467. break;
  468. }
  469. }
  470. if (minscale == INT_MAX) {
  471. pr_warn("Cannot find correct scale values for %dns delay at clkrate %ld, using max prescaler value",
  472. delay_ns, clkrate);
  473. *psc = ARRAY_SIZE(pscale_tbl) - 1;
  474. *sc = SPI_CTAR_SCALE_BITS;
  475. }
  476. }
  477. static void fifo_write(struct fsl_dspi *dspi)
  478. {
  479. regmap_write(dspi->regmap, SPI_PUSHR, dspi_pop_tx_pushr(dspi));
  480. }
  481. static void cmd_fifo_write(struct fsl_dspi *dspi)
  482. {
  483. u16 cmd = dspi->tx_cmd;
  484. if (dspi->len > 0)
  485. cmd |= SPI_PUSHR_CMD_CONT;
  486. regmap_write(dspi->regmap_pushr, PUSHR_CMD, cmd);
  487. }
  488. static void tx_fifo_write(struct fsl_dspi *dspi, u16 txdata)
  489. {
  490. regmap_write(dspi->regmap_pushr, PUSHR_TX, txdata);
  491. }
  492. static void dspi_tcfq_write(struct fsl_dspi *dspi)
  493. {
  494. /* Clear transfer count */
  495. dspi->tx_cmd |= SPI_PUSHR_CMD_CTCNT;
  496. if (dspi->devtype_data->xspi_mode && dspi->bits_per_word > 16) {
  497. /* Write two TX FIFO entries first, and then the corresponding
  498. * CMD FIFO entry.
  499. */
  500. u32 data = dspi_pop_tx(dspi);
  501. if (dspi->cur_chip->ctar_val & SPI_CTAR_LSBFE(1)) {
  502. /* LSB */
  503. tx_fifo_write(dspi, data & 0xFFFF);
  504. tx_fifo_write(dspi, data >> 16);
  505. } else {
  506. /* MSB */
  507. tx_fifo_write(dspi, data >> 16);
  508. tx_fifo_write(dspi, data & 0xFFFF);
  509. }
  510. cmd_fifo_write(dspi);
  511. } else {
  512. /* Write one entry to both TX FIFO and CMD FIFO
  513. * simultaneously.
  514. */
  515. fifo_write(dspi);
  516. }
  517. }
  518. static u32 fifo_read(struct fsl_dspi *dspi)
  519. {
  520. u32 rxdata = 0;
  521. regmap_read(dspi->regmap, SPI_POPR, &rxdata);
  522. return rxdata;
  523. }
  524. static void dspi_tcfq_read(struct fsl_dspi *dspi)
  525. {
  526. dspi_push_rx(dspi, fifo_read(dspi));
  527. }
  528. static void dspi_eoq_write(struct fsl_dspi *dspi)
  529. {
  530. int fifo_size = DSPI_FIFO_SIZE;
  531. /* Fill TX FIFO with as many transfers as possible */
  532. while (dspi->len && fifo_size--) {
  533. /* Request EOQF for last transfer in FIFO */
  534. if (dspi->len == dspi->bytes_per_word || fifo_size == 0)
  535. dspi->tx_cmd |= SPI_PUSHR_CMD_EOQ;
  536. /* Clear transfer count for first transfer in FIFO */
  537. if (fifo_size == (DSPI_FIFO_SIZE - 1))
  538. dspi->tx_cmd |= SPI_PUSHR_CMD_CTCNT;
  539. /* Write combined TX FIFO and CMD FIFO entry */
  540. fifo_write(dspi);
  541. }
  542. }
  543. static void dspi_eoq_read(struct fsl_dspi *dspi)
  544. {
  545. int fifo_size = DSPI_FIFO_SIZE;
  546. /* Read one FIFO entry at and push to rx buffer */
  547. while ((dspi->rx < dspi->rx_end) && fifo_size--)
  548. dspi_push_rx(dspi, fifo_read(dspi));
  549. }
  550. static int dspi_transfer_one_message(struct spi_master *master,
  551. struct spi_message *message)
  552. {
  553. struct fsl_dspi *dspi = spi_master_get_devdata(master);
  554. struct spi_device *spi = message->spi;
  555. struct spi_transfer *transfer;
  556. int status = 0;
  557. enum dspi_trans_mode trans_mode;
  558. message->actual_length = 0;
  559. list_for_each_entry(transfer, &message->transfers, transfer_list) {
  560. dspi->cur_transfer = transfer;
  561. dspi->cur_msg = message;
  562. dspi->cur_chip = spi_get_ctldata(spi);
  563. /* Prepare command word for CMD FIFO */
  564. dspi->tx_cmd = SPI_PUSHR_CMD_CTAS(0) |
  565. SPI_PUSHR_CMD_PCS(spi->chip_select);
  566. if (list_is_last(&dspi->cur_transfer->transfer_list,
  567. &dspi->cur_msg->transfers)) {
  568. /* Leave PCS activated after last transfer when
  569. * cs_change is set.
  570. */
  571. if (transfer->cs_change)
  572. dspi->tx_cmd |= SPI_PUSHR_CMD_CONT;
  573. } else {
  574. /* Keep PCS active between transfers in same message
  575. * when cs_change is not set, and de-activate PCS
  576. * between transfers in the same message when
  577. * cs_change is set.
  578. */
  579. if (!transfer->cs_change)
  580. dspi->tx_cmd |= SPI_PUSHR_CMD_CONT;
  581. }
  582. dspi->void_write_data = dspi->cur_chip->void_write_data;
  583. dspi->tx = transfer->tx_buf;
  584. dspi->rx = transfer->rx_buf;
  585. dspi->rx_end = dspi->rx + transfer->len;
  586. dspi->len = transfer->len;
  587. /* Validated transfer specific frame size (defaults applied) */
  588. dspi->bits_per_word = transfer->bits_per_word;
  589. if (transfer->bits_per_word <= 8)
  590. dspi->bytes_per_word = 1;
  591. else if (transfer->bits_per_word <= 16)
  592. dspi->bytes_per_word = 2;
  593. else
  594. dspi->bytes_per_word = 4;
  595. regmap_update_bits(dspi->regmap, SPI_MCR,
  596. SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF,
  597. SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF);
  598. regmap_write(dspi->regmap, SPI_CTAR(0),
  599. dspi->cur_chip->ctar_val |
  600. SPI_FRAME_BITS(transfer->bits_per_word));
  601. if (dspi->devtype_data->xspi_mode)
  602. regmap_write(dspi->regmap, SPI_CTARE(0),
  603. SPI_FRAME_EBITS(transfer->bits_per_word)
  604. | SPI_CTARE_DTCP(1));
  605. trans_mode = dspi->devtype_data->trans_mode;
  606. switch (trans_mode) {
  607. case DSPI_EOQ_MODE:
  608. regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_EOQFE);
  609. dspi_eoq_write(dspi);
  610. break;
  611. case DSPI_TCFQ_MODE:
  612. regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_TCFQE);
  613. dspi_tcfq_write(dspi);
  614. break;
  615. case DSPI_DMA_MODE:
  616. regmap_write(dspi->regmap, SPI_RSER,
  617. SPI_RSER_TFFFE | SPI_RSER_TFFFD |
  618. SPI_RSER_RFDFE | SPI_RSER_RFDFD);
  619. status = dspi_dma_xfer(dspi);
  620. break;
  621. default:
  622. dev_err(&dspi->pdev->dev, "unsupported trans_mode %u\n",
  623. trans_mode);
  624. status = -EINVAL;
  625. goto out;
  626. }
  627. if (trans_mode != DSPI_DMA_MODE) {
  628. if (wait_event_interruptible(dspi->waitq,
  629. dspi->waitflags))
  630. dev_err(&dspi->pdev->dev,
  631. "wait transfer complete fail!\n");
  632. dspi->waitflags = 0;
  633. }
  634. if (transfer->delay_usecs)
  635. udelay(transfer->delay_usecs);
  636. }
  637. out:
  638. message->status = status;
  639. spi_finalize_current_message(master);
  640. return status;
  641. }
  642. static int dspi_setup(struct spi_device *spi)
  643. {
  644. struct chip_data *chip;
  645. struct fsl_dspi *dspi = spi_master_get_devdata(spi->master);
  646. struct fsl_dspi_platform_data *pdata;
  647. u32 cs_sck_delay = 0, sck_cs_delay = 0;
  648. unsigned char br = 0, pbr = 0, pcssck = 0, cssck = 0;
  649. unsigned char pasc = 0, asc = 0;
  650. unsigned long clkrate;
  651. /* Only alloc on first setup */
  652. chip = spi_get_ctldata(spi);
  653. if (chip == NULL) {
  654. chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
  655. if (!chip)
  656. return -ENOMEM;
  657. }
  658. pdata = dev_get_platdata(&dspi->pdev->dev);
  659. if (!pdata) {
  660. of_property_read_u32(spi->dev.of_node, "fsl,spi-cs-sck-delay",
  661. &cs_sck_delay);
  662. of_property_read_u32(spi->dev.of_node, "fsl,spi-sck-cs-delay",
  663. &sck_cs_delay);
  664. } else {
  665. cs_sck_delay = pdata->cs_sck_delay;
  666. sck_cs_delay = pdata->sck_cs_delay;
  667. }
  668. chip->void_write_data = 0;
  669. clkrate = clk_get_rate(dspi->clk);
  670. hz_to_spi_baud(&pbr, &br, spi->max_speed_hz, clkrate);
  671. /* Set PCS to SCK delay scale values */
  672. ns_delay_scale(&pcssck, &cssck, cs_sck_delay, clkrate);
  673. /* Set After SCK delay scale values */
  674. ns_delay_scale(&pasc, &asc, sck_cs_delay, clkrate);
  675. chip->ctar_val = SPI_CTAR_CPOL(spi->mode & SPI_CPOL ? 1 : 0)
  676. | SPI_CTAR_CPHA(spi->mode & SPI_CPHA ? 1 : 0)
  677. | SPI_CTAR_LSBFE(spi->mode & SPI_LSB_FIRST ? 1 : 0)
  678. | SPI_CTAR_PCSSCK(pcssck)
  679. | SPI_CTAR_CSSCK(cssck)
  680. | SPI_CTAR_PASC(pasc)
  681. | SPI_CTAR_ASC(asc)
  682. | SPI_CTAR_PBR(pbr)
  683. | SPI_CTAR_BR(br);
  684. spi_set_ctldata(spi, chip);
  685. return 0;
  686. }
  687. static void dspi_cleanup(struct spi_device *spi)
  688. {
  689. struct chip_data *chip = spi_get_ctldata((struct spi_device *)spi);
  690. dev_dbg(&spi->dev, "spi_device %u.%u cleanup\n",
  691. spi->master->bus_num, spi->chip_select);
  692. kfree(chip);
  693. }
  694. static irqreturn_t dspi_interrupt(int irq, void *dev_id)
  695. {
  696. struct fsl_dspi *dspi = (struct fsl_dspi *)dev_id;
  697. struct spi_message *msg = dspi->cur_msg;
  698. enum dspi_trans_mode trans_mode;
  699. u32 spi_sr, spi_tcr;
  700. u16 spi_tcnt;
  701. regmap_read(dspi->regmap, SPI_SR, &spi_sr);
  702. regmap_write(dspi->regmap, SPI_SR, spi_sr);
  703. if (spi_sr & (SPI_SR_EOQF | SPI_SR_TCFQF)) {
  704. /* Get transfer counter (in number of SPI transfers). It was
  705. * reset to 0 when transfer(s) were started.
  706. */
  707. regmap_read(dspi->regmap, SPI_TCR, &spi_tcr);
  708. spi_tcnt = SPI_TCR_GET_TCNT(spi_tcr);
  709. /* Update total number of bytes that were transferred */
  710. msg->actual_length += spi_tcnt * dspi->bytes_per_word;
  711. trans_mode = dspi->devtype_data->trans_mode;
  712. switch (trans_mode) {
  713. case DSPI_EOQ_MODE:
  714. dspi_eoq_read(dspi);
  715. break;
  716. case DSPI_TCFQ_MODE:
  717. dspi_tcfq_read(dspi);
  718. break;
  719. default:
  720. dev_err(&dspi->pdev->dev, "unsupported trans_mode %u\n",
  721. trans_mode);
  722. return IRQ_HANDLED;
  723. }
  724. if (!dspi->len) {
  725. dspi->waitflags = 1;
  726. wake_up_interruptible(&dspi->waitq);
  727. } else {
  728. switch (trans_mode) {
  729. case DSPI_EOQ_MODE:
  730. dspi_eoq_write(dspi);
  731. break;
  732. case DSPI_TCFQ_MODE:
  733. dspi_tcfq_write(dspi);
  734. break;
  735. default:
  736. dev_err(&dspi->pdev->dev,
  737. "unsupported trans_mode %u\n",
  738. trans_mode);
  739. }
  740. }
  741. }
  742. return IRQ_HANDLED;
  743. }
  744. static const struct of_device_id fsl_dspi_dt_ids[] = {
  745. { .compatible = "fsl,vf610-dspi", .data = &vf610_data, },
  746. { .compatible = "fsl,ls1021a-v1.0-dspi", .data = &ls1021a_v1_data, },
  747. { .compatible = "fsl,ls2085a-dspi", .data = &ls2085a_data, },
  748. { /* sentinel */ }
  749. };
  750. MODULE_DEVICE_TABLE(of, fsl_dspi_dt_ids);
  751. #ifdef CONFIG_PM_SLEEP
  752. static int dspi_suspend(struct device *dev)
  753. {
  754. struct spi_master *master = dev_get_drvdata(dev);
  755. struct fsl_dspi *dspi = spi_master_get_devdata(master);
  756. spi_master_suspend(master);
  757. clk_disable_unprepare(dspi->clk);
  758. pinctrl_pm_select_sleep_state(dev);
  759. return 0;
  760. }
  761. static int dspi_resume(struct device *dev)
  762. {
  763. struct spi_master *master = dev_get_drvdata(dev);
  764. struct fsl_dspi *dspi = spi_master_get_devdata(master);
  765. int ret;
  766. pinctrl_pm_select_default_state(dev);
  767. ret = clk_prepare_enable(dspi->clk);
  768. if (ret)
  769. return ret;
  770. spi_master_resume(master);
  771. return 0;
  772. }
  773. #endif /* CONFIG_PM_SLEEP */
  774. static SIMPLE_DEV_PM_OPS(dspi_pm, dspi_suspend, dspi_resume);
  775. static const struct regmap_range dspi_volatile_ranges[] = {
  776. regmap_reg_range(SPI_MCR, SPI_TCR),
  777. regmap_reg_range(SPI_SR, SPI_SR),
  778. regmap_reg_range(SPI_PUSHR, SPI_RXFR3),
  779. };
  780. static const struct regmap_access_table dspi_volatile_table = {
  781. .yes_ranges = dspi_volatile_ranges,
  782. .n_yes_ranges = ARRAY_SIZE(dspi_volatile_ranges),
  783. };
  784. static const struct regmap_config dspi_regmap_config = {
  785. .reg_bits = 32,
  786. .val_bits = 32,
  787. .reg_stride = 4,
  788. .max_register = 0x88,
  789. .volatile_table = &dspi_volatile_table,
  790. };
  791. static const struct regmap_range dspi_xspi_volatile_ranges[] = {
  792. regmap_reg_range(SPI_MCR, SPI_TCR),
  793. regmap_reg_range(SPI_SR, SPI_SR),
  794. regmap_reg_range(SPI_PUSHR, SPI_RXFR3),
  795. regmap_reg_range(SPI_SREX, SPI_SREX),
  796. };
  797. static const struct regmap_access_table dspi_xspi_volatile_table = {
  798. .yes_ranges = dspi_xspi_volatile_ranges,
  799. .n_yes_ranges = ARRAY_SIZE(dspi_xspi_volatile_ranges),
  800. };
  801. static const struct regmap_config dspi_xspi_regmap_config[] = {
  802. {
  803. .reg_bits = 32,
  804. .val_bits = 32,
  805. .reg_stride = 4,
  806. .max_register = 0x13c,
  807. .volatile_table = &dspi_xspi_volatile_table,
  808. },
  809. {
  810. .name = "pushr",
  811. .reg_bits = 16,
  812. .val_bits = 16,
  813. .reg_stride = 2,
  814. .max_register = 0x2,
  815. },
  816. };
  817. static void dspi_init(struct fsl_dspi *dspi)
  818. {
  819. regmap_write(dspi->regmap, SPI_MCR, SPI_MCR_MASTER | SPI_MCR_PCSIS |
  820. (dspi->devtype_data->xspi_mode ? SPI_MCR_XSPI : 0));
  821. regmap_write(dspi->regmap, SPI_SR, SPI_SR_CLEAR);
  822. if (dspi->devtype_data->xspi_mode)
  823. regmap_write(dspi->regmap, SPI_CTARE(0),
  824. SPI_CTARE_FMSZE(0) | SPI_CTARE_DTCP(1));
  825. }
  826. static int dspi_probe(struct platform_device *pdev)
  827. {
  828. struct device_node *np = pdev->dev.of_node;
  829. struct spi_master *master;
  830. struct fsl_dspi *dspi;
  831. struct resource *res;
  832. const struct regmap_config *regmap_config;
  833. void __iomem *base;
  834. struct fsl_dspi_platform_data *pdata;
  835. int ret = 0, cs_num, bus_num;
  836. master = spi_alloc_master(&pdev->dev, sizeof(struct fsl_dspi));
  837. if (!master)
  838. return -ENOMEM;
  839. dspi = spi_master_get_devdata(master);
  840. dspi->pdev = pdev;
  841. dspi->master = master;
  842. master->transfer = NULL;
  843. master->setup = dspi_setup;
  844. master->transfer_one_message = dspi_transfer_one_message;
  845. master->dev.of_node = pdev->dev.of_node;
  846. master->cleanup = dspi_cleanup;
  847. master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
  848. pdata = dev_get_platdata(&pdev->dev);
  849. if (pdata) {
  850. master->num_chipselect = pdata->cs_num;
  851. master->bus_num = pdata->bus_num;
  852. dspi->devtype_data = &coldfire_data;
  853. } else {
  854. ret = of_property_read_u32(np, "spi-num-chipselects", &cs_num);
  855. if (ret < 0) {
  856. dev_err(&pdev->dev, "can't get spi-num-chipselects\n");
  857. goto out_master_put;
  858. }
  859. master->num_chipselect = cs_num;
  860. ret = of_property_read_u32(np, "bus-num", &bus_num);
  861. if (ret < 0) {
  862. dev_err(&pdev->dev, "can't get bus-num\n");
  863. goto out_master_put;
  864. }
  865. master->bus_num = bus_num;
  866. dspi->devtype_data = of_device_get_match_data(&pdev->dev);
  867. if (!dspi->devtype_data) {
  868. dev_err(&pdev->dev, "can't get devtype_data\n");
  869. ret = -EFAULT;
  870. goto out_master_put;
  871. }
  872. }
  873. if (dspi->devtype_data->xspi_mode)
  874. master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
  875. else
  876. master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
  877. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  878. base = devm_ioremap_resource(&pdev->dev, res);
  879. if (IS_ERR(base)) {
  880. ret = PTR_ERR(base);
  881. goto out_master_put;
  882. }
  883. if (dspi->devtype_data->xspi_mode)
  884. regmap_config = &dspi_xspi_regmap_config[0];
  885. else
  886. regmap_config = &dspi_regmap_config;
  887. dspi->regmap = devm_regmap_init_mmio(&pdev->dev, base, regmap_config);
  888. if (IS_ERR(dspi->regmap)) {
  889. dev_err(&pdev->dev, "failed to init regmap: %ld\n",
  890. PTR_ERR(dspi->regmap));
  891. ret = PTR_ERR(dspi->regmap);
  892. goto out_master_put;
  893. }
  894. if (dspi->devtype_data->xspi_mode) {
  895. dspi->regmap_pushr = devm_regmap_init_mmio(
  896. &pdev->dev, base + SPI_PUSHR,
  897. &dspi_xspi_regmap_config[1]);
  898. if (IS_ERR(dspi->regmap_pushr)) {
  899. dev_err(&pdev->dev,
  900. "failed to init pushr regmap: %ld\n",
  901. PTR_ERR(dspi->regmap_pushr));
  902. ret = PTR_ERR(dspi->regmap_pushr);
  903. goto out_master_put;
  904. }
  905. }
  906. dspi->clk = devm_clk_get(&pdev->dev, "dspi");
  907. if (IS_ERR(dspi->clk)) {
  908. ret = PTR_ERR(dspi->clk);
  909. dev_err(&pdev->dev, "unable to get clock\n");
  910. goto out_master_put;
  911. }
  912. ret = clk_prepare_enable(dspi->clk);
  913. if (ret)
  914. goto out_master_put;
  915. dspi_init(dspi);
  916. dspi->irq = platform_get_irq(pdev, 0);
  917. if (dspi->irq < 0) {
  918. dev_err(&pdev->dev, "can't get platform irq\n");
  919. ret = dspi->irq;
  920. goto out_clk_put;
  921. }
  922. ret = devm_request_irq(&pdev->dev, dspi->irq, dspi_interrupt, 0,
  923. pdev->name, dspi);
  924. if (ret < 0) {
  925. dev_err(&pdev->dev, "Unable to attach DSPI interrupt\n");
  926. goto out_clk_put;
  927. }
  928. if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) {
  929. ret = dspi_request_dma(dspi, res->start);
  930. if (ret < 0) {
  931. dev_err(&pdev->dev, "can't get dma channels\n");
  932. goto out_clk_put;
  933. }
  934. }
  935. master->max_speed_hz =
  936. clk_get_rate(dspi->clk) / dspi->devtype_data->max_clock_factor;
  937. init_waitqueue_head(&dspi->waitq);
  938. platform_set_drvdata(pdev, master);
  939. ret = spi_register_master(master);
  940. if (ret != 0) {
  941. dev_err(&pdev->dev, "Problem registering DSPI master\n");
  942. goto out_clk_put;
  943. }
  944. return ret;
  945. out_clk_put:
  946. clk_disable_unprepare(dspi->clk);
  947. out_master_put:
  948. spi_master_put(master);
  949. return ret;
  950. }
  951. static int dspi_remove(struct platform_device *pdev)
  952. {
  953. struct spi_master *master = platform_get_drvdata(pdev);
  954. struct fsl_dspi *dspi = spi_master_get_devdata(master);
  955. /* Disconnect from the SPI framework */
  956. dspi_release_dma(dspi);
  957. clk_disable_unprepare(dspi->clk);
  958. spi_unregister_master(dspi->master);
  959. return 0;
  960. }
  961. static struct platform_driver fsl_dspi_driver = {
  962. .driver.name = DRIVER_NAME,
  963. .driver.of_match_table = fsl_dspi_dt_ids,
  964. .driver.owner = THIS_MODULE,
  965. .driver.pm = &dspi_pm,
  966. .probe = dspi_probe,
  967. .remove = dspi_remove,
  968. };
  969. module_platform_driver(fsl_dspi_driver);
  970. MODULE_DESCRIPTION("Freescale DSPI Controller Driver");
  971. MODULE_LICENSE("GPL");
  972. MODULE_ALIAS("platform:" DRIVER_NAME);