spi-sirf.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852
  1. /*
  2. * SPI bus driver for CSR SiRFprimaII
  3. *
  4. * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
  5. *
  6. * Licensed under GPLv2 or later.
  7. */
  8. #include <linux/module.h>
  9. #include <linux/kernel.h>
  10. #include <linux/slab.h>
  11. #include <linux/clk.h>
  12. #include <linux/completion.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/io.h>
  15. #include <linux/of.h>
  16. #include <linux/bitops.h>
  17. #include <linux/err.h>
  18. #include <linux/platform_device.h>
  19. #include <linux/of_gpio.h>
  20. #include <linux/spi/spi.h>
  21. #include <linux/spi/spi_bitbang.h>
  22. #include <linux/dmaengine.h>
  23. #include <linux/dma-direction.h>
  24. #include <linux/dma-mapping.h>
  25. #define DRIVER_NAME "sirfsoc_spi"
  26. #define SIRFSOC_SPI_CTRL 0x0000
  27. #define SIRFSOC_SPI_CMD 0x0004
  28. #define SIRFSOC_SPI_TX_RX_EN 0x0008
  29. #define SIRFSOC_SPI_INT_EN 0x000C
  30. #define SIRFSOC_SPI_INT_STATUS 0x0010
  31. #define SIRFSOC_SPI_TX_DMA_IO_CTRL 0x0100
  32. #define SIRFSOC_SPI_TX_DMA_IO_LEN 0x0104
  33. #define SIRFSOC_SPI_TXFIFO_CTRL 0x0108
  34. #define SIRFSOC_SPI_TXFIFO_LEVEL_CHK 0x010C
  35. #define SIRFSOC_SPI_TXFIFO_OP 0x0110
  36. #define SIRFSOC_SPI_TXFIFO_STATUS 0x0114
  37. #define SIRFSOC_SPI_TXFIFO_DATA 0x0118
  38. #define SIRFSOC_SPI_RX_DMA_IO_CTRL 0x0120
  39. #define SIRFSOC_SPI_RX_DMA_IO_LEN 0x0124
  40. #define SIRFSOC_SPI_RXFIFO_CTRL 0x0128
  41. #define SIRFSOC_SPI_RXFIFO_LEVEL_CHK 0x012C
  42. #define SIRFSOC_SPI_RXFIFO_OP 0x0130
  43. #define SIRFSOC_SPI_RXFIFO_STATUS 0x0134
  44. #define SIRFSOC_SPI_RXFIFO_DATA 0x0138
  45. #define SIRFSOC_SPI_DUMMY_DELAY_CTL 0x0144
  46. /* SPI CTRL register defines */
  47. #define SIRFSOC_SPI_SLV_MODE BIT(16)
  48. #define SIRFSOC_SPI_CMD_MODE BIT(17)
  49. #define SIRFSOC_SPI_CS_IO_OUT BIT(18)
  50. #define SIRFSOC_SPI_CS_IO_MODE BIT(19)
  51. #define SIRFSOC_SPI_CLK_IDLE_STAT BIT(20)
  52. #define SIRFSOC_SPI_CS_IDLE_STAT BIT(21)
  53. #define SIRFSOC_SPI_TRAN_MSB BIT(22)
  54. #define SIRFSOC_SPI_DRV_POS_EDGE BIT(23)
  55. #define SIRFSOC_SPI_CS_HOLD_TIME BIT(24)
  56. #define SIRFSOC_SPI_CLK_SAMPLE_MODE BIT(25)
  57. #define SIRFSOC_SPI_TRAN_DAT_FORMAT_8 (0 << 26)
  58. #define SIRFSOC_SPI_TRAN_DAT_FORMAT_12 (1 << 26)
  59. #define SIRFSOC_SPI_TRAN_DAT_FORMAT_16 (2 << 26)
  60. #define SIRFSOC_SPI_TRAN_DAT_FORMAT_32 (3 << 26)
  61. #define SIRFSOC_SPI_CMD_BYTE_NUM(x) ((x & 3) << 28)
  62. #define SIRFSOC_SPI_ENA_AUTO_CLR BIT(30)
  63. #define SIRFSOC_SPI_MUL_DAT_MODE BIT(31)
  64. /* Interrupt Enable */
  65. #define SIRFSOC_SPI_RX_DONE_INT_EN BIT(0)
  66. #define SIRFSOC_SPI_TX_DONE_INT_EN BIT(1)
  67. #define SIRFSOC_SPI_RX_OFLOW_INT_EN BIT(2)
  68. #define SIRFSOC_SPI_TX_UFLOW_INT_EN BIT(3)
  69. #define SIRFSOC_SPI_RX_IO_DMA_INT_EN BIT(4)
  70. #define SIRFSOC_SPI_TX_IO_DMA_INT_EN BIT(5)
  71. #define SIRFSOC_SPI_RXFIFO_FULL_INT_EN BIT(6)
  72. #define SIRFSOC_SPI_TXFIFO_EMPTY_INT_EN BIT(7)
  73. #define SIRFSOC_SPI_RXFIFO_THD_INT_EN BIT(8)
  74. #define SIRFSOC_SPI_TXFIFO_THD_INT_EN BIT(9)
  75. #define SIRFSOC_SPI_FRM_END_INT_EN BIT(10)
  76. #define SIRFSOC_SPI_INT_MASK_ALL 0x1FFF
  77. /* Interrupt status */
  78. #define SIRFSOC_SPI_RX_DONE BIT(0)
  79. #define SIRFSOC_SPI_TX_DONE BIT(1)
  80. #define SIRFSOC_SPI_RX_OFLOW BIT(2)
  81. #define SIRFSOC_SPI_TX_UFLOW BIT(3)
  82. #define SIRFSOC_SPI_RX_IO_DMA BIT(4)
  83. #define SIRFSOC_SPI_RX_FIFO_FULL BIT(6)
  84. #define SIRFSOC_SPI_TXFIFO_EMPTY BIT(7)
  85. #define SIRFSOC_SPI_RXFIFO_THD_REACH BIT(8)
  86. #define SIRFSOC_SPI_TXFIFO_THD_REACH BIT(9)
  87. #define SIRFSOC_SPI_FRM_END BIT(10)
  88. /* TX RX enable */
  89. #define SIRFSOC_SPI_RX_EN BIT(0)
  90. #define SIRFSOC_SPI_TX_EN BIT(1)
  91. #define SIRFSOC_SPI_CMD_TX_EN BIT(2)
  92. #define SIRFSOC_SPI_IO_MODE_SEL BIT(0)
  93. #define SIRFSOC_SPI_RX_DMA_FLUSH BIT(2)
  94. /* FIFO OPs */
  95. #define SIRFSOC_SPI_FIFO_RESET BIT(0)
  96. #define SIRFSOC_SPI_FIFO_START BIT(1)
  97. /* FIFO CTRL */
  98. #define SIRFSOC_SPI_FIFO_WIDTH_BYTE (0 << 0)
  99. #define SIRFSOC_SPI_FIFO_WIDTH_WORD (1 << 0)
  100. #define SIRFSOC_SPI_FIFO_WIDTH_DWORD (2 << 0)
  101. /* FIFO Status */
  102. #define SIRFSOC_SPI_FIFO_LEVEL_MASK 0xFF
  103. #define SIRFSOC_SPI_FIFO_FULL BIT(8)
  104. #define SIRFSOC_SPI_FIFO_EMPTY BIT(9)
  105. /* 256 bytes rx/tx FIFO */
  106. #define SIRFSOC_SPI_FIFO_SIZE 256
  107. #define SIRFSOC_SPI_DAT_FRM_LEN_MAX (64 * 1024)
  108. #define SIRFSOC_SPI_FIFO_SC(x) ((x) & 0x3F)
  109. #define SIRFSOC_SPI_FIFO_LC(x) (((x) & 0x3F) << 10)
  110. #define SIRFSOC_SPI_FIFO_HC(x) (((x) & 0x3F) << 20)
  111. #define SIRFSOC_SPI_FIFO_THD(x) (((x) & 0xFF) << 2)
  112. /*
  113. * only if the rx/tx buffer and transfer size are 4-bytes aligned, we use dma
  114. * due to the limitation of dma controller
  115. */
  116. #define ALIGNED(x) (!((u32)x & 0x3))
  117. #define IS_DMA_VALID(x) (x && ALIGNED(x->tx_buf) && ALIGNED(x->rx_buf) && \
  118. ALIGNED(x->len) && (x->len < 2 * PAGE_SIZE))
  119. #define SIRFSOC_MAX_CMD_BYTES 4
  120. struct sirfsoc_spi {
  121. struct spi_bitbang bitbang;
  122. struct completion rx_done;
  123. struct completion tx_done;
  124. void __iomem *base;
  125. u32 ctrl_freq; /* SPI controller clock speed */
  126. struct clk *clk;
  127. /* rx & tx bufs from the spi_transfer */
  128. const void *tx;
  129. void *rx;
  130. /* place received word into rx buffer */
  131. void (*rx_word) (struct sirfsoc_spi *);
  132. /* get word from tx buffer for sending */
  133. void (*tx_word) (struct sirfsoc_spi *);
  134. /* number of words left to be tranmitted/received */
  135. unsigned int left_tx_word;
  136. unsigned int left_rx_word;
  137. /* rx & tx DMA channels */
  138. struct dma_chan *rx_chan;
  139. struct dma_chan *tx_chan;
  140. dma_addr_t src_start;
  141. dma_addr_t dst_start;
  142. void *dummypage;
  143. int word_width; /* in bytes */
  144. /*
  145. * if tx size is not more than 4 and rx size is NULL, use
  146. * command model
  147. */
  148. bool tx_by_cmd;
  149. int chipselect[0];
  150. };
  151. static void spi_sirfsoc_rx_word_u8(struct sirfsoc_spi *sspi)
  152. {
  153. u32 data;
  154. u8 *rx = sspi->rx;
  155. data = readl(sspi->base + SIRFSOC_SPI_RXFIFO_DATA);
  156. if (rx) {
  157. *rx++ = (u8) data;
  158. sspi->rx = rx;
  159. }
  160. sspi->left_rx_word--;
  161. }
  162. static void spi_sirfsoc_tx_word_u8(struct sirfsoc_spi *sspi)
  163. {
  164. u32 data = 0;
  165. const u8 *tx = sspi->tx;
  166. if (tx) {
  167. data = *tx++;
  168. sspi->tx = tx;
  169. }
  170. writel(data, sspi->base + SIRFSOC_SPI_TXFIFO_DATA);
  171. sspi->left_tx_word--;
  172. }
  173. static void spi_sirfsoc_rx_word_u16(struct sirfsoc_spi *sspi)
  174. {
  175. u32 data;
  176. u16 *rx = sspi->rx;
  177. data = readl(sspi->base + SIRFSOC_SPI_RXFIFO_DATA);
  178. if (rx) {
  179. *rx++ = (u16) data;
  180. sspi->rx = rx;
  181. }
  182. sspi->left_rx_word--;
  183. }
  184. static void spi_sirfsoc_tx_word_u16(struct sirfsoc_spi *sspi)
  185. {
  186. u32 data = 0;
  187. const u16 *tx = sspi->tx;
  188. if (tx) {
  189. data = *tx++;
  190. sspi->tx = tx;
  191. }
  192. writel(data, sspi->base + SIRFSOC_SPI_TXFIFO_DATA);
  193. sspi->left_tx_word--;
  194. }
  195. static void spi_sirfsoc_rx_word_u32(struct sirfsoc_spi *sspi)
  196. {
  197. u32 data;
  198. u32 *rx = sspi->rx;
  199. data = readl(sspi->base + SIRFSOC_SPI_RXFIFO_DATA);
  200. if (rx) {
  201. *rx++ = (u32) data;
  202. sspi->rx = rx;
  203. }
  204. sspi->left_rx_word--;
  205. }
  206. static void spi_sirfsoc_tx_word_u32(struct sirfsoc_spi *sspi)
  207. {
  208. u32 data = 0;
  209. const u32 *tx = sspi->tx;
  210. if (tx) {
  211. data = *tx++;
  212. sspi->tx = tx;
  213. }
  214. writel(data, sspi->base + SIRFSOC_SPI_TXFIFO_DATA);
  215. sspi->left_tx_word--;
  216. }
  217. static irqreturn_t spi_sirfsoc_irq(int irq, void *dev_id)
  218. {
  219. struct sirfsoc_spi *sspi = dev_id;
  220. u32 spi_stat = readl(sspi->base + SIRFSOC_SPI_INT_STATUS);
  221. if (sspi->tx_by_cmd && (spi_stat & SIRFSOC_SPI_FRM_END)) {
  222. complete(&sspi->tx_done);
  223. writel(0x0, sspi->base + SIRFSOC_SPI_INT_EN);
  224. writel(SIRFSOC_SPI_INT_MASK_ALL,
  225. sspi->base + SIRFSOC_SPI_INT_STATUS);
  226. return IRQ_HANDLED;
  227. }
  228. /* Error Conditions */
  229. if (spi_stat & SIRFSOC_SPI_RX_OFLOW ||
  230. spi_stat & SIRFSOC_SPI_TX_UFLOW) {
  231. complete(&sspi->tx_done);
  232. complete(&sspi->rx_done);
  233. writel(0x0, sspi->base + SIRFSOC_SPI_INT_EN);
  234. writel(SIRFSOC_SPI_INT_MASK_ALL,
  235. sspi->base + SIRFSOC_SPI_INT_STATUS);
  236. return IRQ_HANDLED;
  237. }
  238. if (spi_stat & SIRFSOC_SPI_TXFIFO_EMPTY)
  239. complete(&sspi->tx_done);
  240. while (!(readl(sspi->base + SIRFSOC_SPI_INT_STATUS) &
  241. SIRFSOC_SPI_RX_IO_DMA))
  242. cpu_relax();
  243. complete(&sspi->rx_done);
  244. writel(0x0, sspi->base + SIRFSOC_SPI_INT_EN);
  245. writel(SIRFSOC_SPI_INT_MASK_ALL,
  246. sspi->base + SIRFSOC_SPI_INT_STATUS);
  247. return IRQ_HANDLED;
  248. }
  249. static void spi_sirfsoc_dma_fini_callback(void *data)
  250. {
  251. struct completion *dma_complete = data;
  252. complete(dma_complete);
  253. }
  254. static int spi_sirfsoc_cmd_transfer(struct spi_device *spi,
  255. struct spi_transfer *t)
  256. {
  257. struct sirfsoc_spi *sspi;
  258. int timeout = t->len * 10;
  259. u32 cmd;
  260. sspi = spi_master_get_devdata(spi->master);
  261. memcpy(&cmd, sspi->tx, t->len);
  262. if (sspi->word_width == 1 && !(spi->mode & SPI_LSB_FIRST))
  263. cmd = cpu_to_be32(cmd) >>
  264. ((SIRFSOC_MAX_CMD_BYTES - t->len) * 8);
  265. if (sspi->word_width == 2 && t->len == 4 &&
  266. (!(spi->mode & SPI_LSB_FIRST)))
  267. cmd = ((cmd & 0xffff) << 16) | (cmd >> 16);
  268. writel(cmd, sspi->base + SIRFSOC_SPI_CMD);
  269. writel(SIRFSOC_SPI_FRM_END_INT_EN,
  270. sspi->base + SIRFSOC_SPI_INT_EN);
  271. writel(SIRFSOC_SPI_CMD_TX_EN,
  272. sspi->base + SIRFSOC_SPI_TX_RX_EN);
  273. if (wait_for_completion_timeout(&sspi->tx_done, timeout) == 0) {
  274. dev_err(&spi->dev, "cmd transfer timeout\n");
  275. return 0;
  276. }
  277. return t->len;
  278. }
  279. static void spi_sirfsoc_dma_transfer(struct spi_device *spi,
  280. struct spi_transfer *t)
  281. {
  282. struct sirfsoc_spi *sspi;
  283. struct dma_async_tx_descriptor *rx_desc, *tx_desc;
  284. int timeout = t->len * 10;
  285. sspi = spi_master_get_devdata(spi->master);
  286. writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
  287. writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
  288. writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
  289. writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
  290. writel(0, sspi->base + SIRFSOC_SPI_INT_EN);
  291. writel(SIRFSOC_SPI_INT_MASK_ALL, sspi->base + SIRFSOC_SPI_INT_STATUS);
  292. if (sspi->left_tx_word < SIRFSOC_SPI_DAT_FRM_LEN_MAX) {
  293. writel(readl(sspi->base + SIRFSOC_SPI_CTRL) |
  294. SIRFSOC_SPI_ENA_AUTO_CLR | SIRFSOC_SPI_MUL_DAT_MODE,
  295. sspi->base + SIRFSOC_SPI_CTRL);
  296. writel(sspi->left_tx_word - 1,
  297. sspi->base + SIRFSOC_SPI_TX_DMA_IO_LEN);
  298. writel(sspi->left_tx_word - 1,
  299. sspi->base + SIRFSOC_SPI_RX_DMA_IO_LEN);
  300. } else {
  301. writel(readl(sspi->base + SIRFSOC_SPI_CTRL),
  302. sspi->base + SIRFSOC_SPI_CTRL);
  303. writel(0, sspi->base + SIRFSOC_SPI_TX_DMA_IO_LEN);
  304. writel(0, sspi->base + SIRFSOC_SPI_RX_DMA_IO_LEN);
  305. }
  306. sspi->dst_start = dma_map_single(&spi->dev, sspi->rx, t->len,
  307. (t->tx_buf != t->rx_buf) ?
  308. DMA_FROM_DEVICE : DMA_BIDIRECTIONAL);
  309. rx_desc = dmaengine_prep_slave_single(sspi->rx_chan,
  310. sspi->dst_start, t->len, DMA_DEV_TO_MEM,
  311. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  312. rx_desc->callback = spi_sirfsoc_dma_fini_callback;
  313. rx_desc->callback_param = &sspi->rx_done;
  314. sspi->src_start = dma_map_single(&spi->dev, (void *)sspi->tx, t->len,
  315. (t->tx_buf != t->rx_buf) ?
  316. DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
  317. tx_desc = dmaengine_prep_slave_single(sspi->tx_chan,
  318. sspi->src_start, t->len, DMA_MEM_TO_DEV,
  319. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  320. tx_desc->callback = spi_sirfsoc_dma_fini_callback;
  321. tx_desc->callback_param = &sspi->tx_done;
  322. dmaengine_submit(tx_desc);
  323. dmaengine_submit(rx_desc);
  324. dma_async_issue_pending(sspi->tx_chan);
  325. dma_async_issue_pending(sspi->rx_chan);
  326. writel(SIRFSOC_SPI_RX_EN | SIRFSOC_SPI_TX_EN,
  327. sspi->base + SIRFSOC_SPI_TX_RX_EN);
  328. if (wait_for_completion_timeout(&sspi->rx_done, timeout) == 0) {
  329. dev_err(&spi->dev, "transfer timeout\n");
  330. dmaengine_terminate_all(sspi->rx_chan);
  331. } else
  332. sspi->left_rx_word = 0;
  333. /*
  334. * we only wait tx-done event if transferring by DMA. for PIO,
  335. * we get rx data by writing tx data, so if rx is done, tx has
  336. * done earlier
  337. */
  338. if (wait_for_completion_timeout(&sspi->tx_done, timeout) == 0) {
  339. dev_err(&spi->dev, "transfer timeout\n");
  340. dmaengine_terminate_all(sspi->tx_chan);
  341. }
  342. dma_unmap_single(&spi->dev, sspi->src_start, t->len, DMA_TO_DEVICE);
  343. dma_unmap_single(&spi->dev, sspi->dst_start, t->len, DMA_FROM_DEVICE);
  344. /* TX, RX FIFO stop */
  345. writel(0, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
  346. writel(0, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
  347. if (sspi->left_tx_word >= SIRFSOC_SPI_DAT_FRM_LEN_MAX)
  348. writel(0, sspi->base + SIRFSOC_SPI_TX_RX_EN);
  349. }
  350. static void spi_sirfsoc_pio_transfer(struct spi_device *spi,
  351. struct spi_transfer *t)
  352. {
  353. struct sirfsoc_spi *sspi;
  354. int timeout = t->len * 10;
  355. sspi = spi_master_get_devdata(spi->master);
  356. do {
  357. writel(SIRFSOC_SPI_FIFO_RESET,
  358. sspi->base + SIRFSOC_SPI_RXFIFO_OP);
  359. writel(SIRFSOC_SPI_FIFO_RESET,
  360. sspi->base + SIRFSOC_SPI_TXFIFO_OP);
  361. writel(SIRFSOC_SPI_FIFO_START,
  362. sspi->base + SIRFSOC_SPI_RXFIFO_OP);
  363. writel(SIRFSOC_SPI_FIFO_START,
  364. sspi->base + SIRFSOC_SPI_TXFIFO_OP);
  365. writel(0, sspi->base + SIRFSOC_SPI_INT_EN);
  366. writel(SIRFSOC_SPI_INT_MASK_ALL,
  367. sspi->base + SIRFSOC_SPI_INT_STATUS);
  368. writel(readl(sspi->base + SIRFSOC_SPI_CTRL) |
  369. SIRFSOC_SPI_MUL_DAT_MODE | SIRFSOC_SPI_ENA_AUTO_CLR,
  370. sspi->base + SIRFSOC_SPI_CTRL);
  371. writel(min(sspi->left_tx_word, (u32)(256 / sspi->word_width))
  372. - 1, sspi->base + SIRFSOC_SPI_TX_DMA_IO_LEN);
  373. writel(min(sspi->left_rx_word, (u32)(256 / sspi->word_width))
  374. - 1, sspi->base + SIRFSOC_SPI_RX_DMA_IO_LEN);
  375. while (!((readl(sspi->base + SIRFSOC_SPI_TXFIFO_STATUS)
  376. & SIRFSOC_SPI_FIFO_FULL)) && sspi->left_tx_word)
  377. sspi->tx_word(sspi);
  378. writel(SIRFSOC_SPI_TXFIFO_EMPTY_INT_EN |
  379. SIRFSOC_SPI_TX_UFLOW_INT_EN |
  380. SIRFSOC_SPI_RX_OFLOW_INT_EN,
  381. sspi->base + SIRFSOC_SPI_INT_EN);
  382. writel(SIRFSOC_SPI_RX_EN | SIRFSOC_SPI_TX_EN,
  383. sspi->base + SIRFSOC_SPI_TX_RX_EN);
  384. if (!wait_for_completion_timeout(&sspi->tx_done, timeout) ||
  385. !wait_for_completion_timeout(&sspi->rx_done, timeout)) {
  386. dev_err(&spi->dev, "transfer timeout\n");
  387. break;
  388. }
  389. while (!((readl(sspi->base + SIRFSOC_SPI_RXFIFO_STATUS)
  390. & SIRFSOC_SPI_FIFO_EMPTY)) && sspi->left_rx_word)
  391. sspi->rx_word(sspi);
  392. writel(0, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
  393. writel(0, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
  394. } while (sspi->left_tx_word != 0 || sspi->left_rx_word != 0);
  395. }
  396. static int spi_sirfsoc_transfer(struct spi_device *spi, struct spi_transfer *t)
  397. {
  398. struct sirfsoc_spi *sspi;
  399. sspi = spi_master_get_devdata(spi->master);
  400. sspi->tx = t->tx_buf ? t->tx_buf : sspi->dummypage;
  401. sspi->rx = t->rx_buf ? t->rx_buf : sspi->dummypage;
  402. sspi->left_tx_word = sspi->left_rx_word = t->len / sspi->word_width;
  403. reinit_completion(&sspi->rx_done);
  404. reinit_completion(&sspi->tx_done);
  405. /*
  406. * in the transfer, if transfer data using command register with rx_buf
  407. * null, just fill command data into command register and wait for its
  408. * completion.
  409. */
  410. if (sspi->tx_by_cmd)
  411. spi_sirfsoc_cmd_transfer(spi, t);
  412. else if (IS_DMA_VALID(t))
  413. spi_sirfsoc_dma_transfer(spi, t);
  414. else
  415. spi_sirfsoc_pio_transfer(spi, t);
  416. return t->len - sspi->left_rx_word * sspi->word_width;
  417. }
  418. static void spi_sirfsoc_chipselect(struct spi_device *spi, int value)
  419. {
  420. struct sirfsoc_spi *sspi = spi_master_get_devdata(spi->master);
  421. if (sspi->chipselect[spi->chip_select] == 0) {
  422. u32 regval = readl(sspi->base + SIRFSOC_SPI_CTRL);
  423. switch (value) {
  424. case BITBANG_CS_ACTIVE:
  425. if (spi->mode & SPI_CS_HIGH)
  426. regval |= SIRFSOC_SPI_CS_IO_OUT;
  427. else
  428. regval &= ~SIRFSOC_SPI_CS_IO_OUT;
  429. break;
  430. case BITBANG_CS_INACTIVE:
  431. if (spi->mode & SPI_CS_HIGH)
  432. regval &= ~SIRFSOC_SPI_CS_IO_OUT;
  433. else
  434. regval |= SIRFSOC_SPI_CS_IO_OUT;
  435. break;
  436. }
  437. writel(regval, sspi->base + SIRFSOC_SPI_CTRL);
  438. } else {
  439. int gpio = sspi->chipselect[spi->chip_select];
  440. switch (value) {
  441. case BITBANG_CS_ACTIVE:
  442. gpio_direction_output(gpio,
  443. spi->mode & SPI_CS_HIGH ? 1 : 0);
  444. break;
  445. case BITBANG_CS_INACTIVE:
  446. gpio_direction_output(gpio,
  447. spi->mode & SPI_CS_HIGH ? 0 : 1);
  448. break;
  449. }
  450. }
  451. }
  452. static int
  453. spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
  454. {
  455. struct sirfsoc_spi *sspi;
  456. u8 bits_per_word = 0;
  457. int hz = 0;
  458. u32 regval;
  459. u32 txfifo_ctrl, rxfifo_ctrl;
  460. u32 fifo_size = SIRFSOC_SPI_FIFO_SIZE / 4;
  461. sspi = spi_master_get_devdata(spi->master);
  462. bits_per_word = (t) ? t->bits_per_word : spi->bits_per_word;
  463. hz = t && t->speed_hz ? t->speed_hz : spi->max_speed_hz;
  464. regval = (sspi->ctrl_freq / (2 * hz)) - 1;
  465. if (regval > 0xFFFF || regval < 0) {
  466. dev_err(&spi->dev, "Speed %d not supported\n", hz);
  467. return -EINVAL;
  468. }
  469. switch (bits_per_word) {
  470. case 8:
  471. regval |= SIRFSOC_SPI_TRAN_DAT_FORMAT_8;
  472. sspi->rx_word = spi_sirfsoc_rx_word_u8;
  473. sspi->tx_word = spi_sirfsoc_tx_word_u8;
  474. break;
  475. case 12:
  476. case 16:
  477. regval |= (bits_per_word == 12) ?
  478. SIRFSOC_SPI_TRAN_DAT_FORMAT_12 :
  479. SIRFSOC_SPI_TRAN_DAT_FORMAT_16;
  480. sspi->rx_word = spi_sirfsoc_rx_word_u16;
  481. sspi->tx_word = spi_sirfsoc_tx_word_u16;
  482. break;
  483. case 32:
  484. regval |= SIRFSOC_SPI_TRAN_DAT_FORMAT_32;
  485. sspi->rx_word = spi_sirfsoc_rx_word_u32;
  486. sspi->tx_word = spi_sirfsoc_tx_word_u32;
  487. break;
  488. default:
  489. BUG();
  490. }
  491. sspi->word_width = DIV_ROUND_UP(bits_per_word, 8);
  492. txfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
  493. sspi->word_width;
  494. rxfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
  495. sspi->word_width;
  496. if (!(spi->mode & SPI_CS_HIGH))
  497. regval |= SIRFSOC_SPI_CS_IDLE_STAT;
  498. if (!(spi->mode & SPI_LSB_FIRST))
  499. regval |= SIRFSOC_SPI_TRAN_MSB;
  500. if (spi->mode & SPI_CPOL)
  501. regval |= SIRFSOC_SPI_CLK_IDLE_STAT;
  502. /*
  503. * Data should be driven at least 1/2 cycle before the fetch edge
  504. * to make sure that data gets stable at the fetch edge.
  505. */
  506. if (((spi->mode & SPI_CPOL) && (spi->mode & SPI_CPHA)) ||
  507. (!(spi->mode & SPI_CPOL) && !(spi->mode & SPI_CPHA)))
  508. regval &= ~SIRFSOC_SPI_DRV_POS_EDGE;
  509. else
  510. regval |= SIRFSOC_SPI_DRV_POS_EDGE;
  511. writel(SIRFSOC_SPI_FIFO_SC(fifo_size - 2) |
  512. SIRFSOC_SPI_FIFO_LC(fifo_size / 2) |
  513. SIRFSOC_SPI_FIFO_HC(2),
  514. sspi->base + SIRFSOC_SPI_TXFIFO_LEVEL_CHK);
  515. writel(SIRFSOC_SPI_FIFO_SC(2) |
  516. SIRFSOC_SPI_FIFO_LC(fifo_size / 2) |
  517. SIRFSOC_SPI_FIFO_HC(fifo_size - 2),
  518. sspi->base + SIRFSOC_SPI_RXFIFO_LEVEL_CHK);
  519. writel(txfifo_ctrl, sspi->base + SIRFSOC_SPI_TXFIFO_CTRL);
  520. writel(rxfifo_ctrl, sspi->base + SIRFSOC_SPI_RXFIFO_CTRL);
  521. if (t && t->tx_buf && !t->rx_buf && (t->len <= SIRFSOC_MAX_CMD_BYTES)) {
  522. regval |= (SIRFSOC_SPI_CMD_BYTE_NUM((t->len - 1)) |
  523. SIRFSOC_SPI_CMD_MODE);
  524. sspi->tx_by_cmd = true;
  525. } else {
  526. regval &= ~SIRFSOC_SPI_CMD_MODE;
  527. sspi->tx_by_cmd = false;
  528. }
  529. /*
  530. * set spi controller in RISC chipselect mode, we are controlling CS by
  531. * software BITBANG_CS_ACTIVE and BITBANG_CS_INACTIVE.
  532. */
  533. regval |= SIRFSOC_SPI_CS_IO_MODE;
  534. writel(regval, sspi->base + SIRFSOC_SPI_CTRL);
  535. if (IS_DMA_VALID(t)) {
  536. /* Enable DMA mode for RX, TX */
  537. writel(0, sspi->base + SIRFSOC_SPI_TX_DMA_IO_CTRL);
  538. writel(SIRFSOC_SPI_RX_DMA_FLUSH,
  539. sspi->base + SIRFSOC_SPI_RX_DMA_IO_CTRL);
  540. } else {
  541. /* Enable IO mode for RX, TX */
  542. writel(SIRFSOC_SPI_IO_MODE_SEL,
  543. sspi->base + SIRFSOC_SPI_TX_DMA_IO_CTRL);
  544. writel(SIRFSOC_SPI_IO_MODE_SEL,
  545. sspi->base + SIRFSOC_SPI_RX_DMA_IO_CTRL);
  546. }
  547. return 0;
  548. }
  549. static int spi_sirfsoc_setup(struct spi_device *spi)
  550. {
  551. if (!spi->max_speed_hz)
  552. return -EINVAL;
  553. return spi_sirfsoc_setup_transfer(spi, NULL);
  554. }
  555. static int spi_sirfsoc_probe(struct platform_device *pdev)
  556. {
  557. struct sirfsoc_spi *sspi;
  558. struct spi_master *master;
  559. struct resource *mem_res;
  560. int num_cs, cs_gpio, irq;
  561. int i;
  562. int ret;
  563. ret = of_property_read_u32(pdev->dev.of_node,
  564. "sirf,spi-num-chipselects", &num_cs);
  565. if (ret < 0) {
  566. dev_err(&pdev->dev, "Unable to get chip select number\n");
  567. goto err_cs;
  568. }
  569. master = spi_alloc_master(&pdev->dev,
  570. sizeof(*sspi) + sizeof(int) * num_cs);
  571. if (!master) {
  572. dev_err(&pdev->dev, "Unable to allocate SPI master\n");
  573. return -ENOMEM;
  574. }
  575. platform_set_drvdata(pdev, master);
  576. sspi = spi_master_get_devdata(master);
  577. master->num_chipselect = num_cs;
  578. for (i = 0; i < master->num_chipselect; i++) {
  579. cs_gpio = of_get_named_gpio(pdev->dev.of_node, "cs-gpios", i);
  580. if (cs_gpio < 0) {
  581. dev_err(&pdev->dev, "can't get cs gpio from DT\n");
  582. ret = -ENODEV;
  583. goto free_master;
  584. }
  585. sspi->chipselect[i] = cs_gpio;
  586. if (cs_gpio == 0)
  587. continue; /* use cs from spi controller */
  588. ret = gpio_request(cs_gpio, DRIVER_NAME);
  589. if (ret) {
  590. while (i > 0) {
  591. i--;
  592. if (sspi->chipselect[i] > 0)
  593. gpio_free(sspi->chipselect[i]);
  594. }
  595. dev_err(&pdev->dev, "fail to request cs gpios\n");
  596. goto free_master;
  597. }
  598. }
  599. mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  600. sspi->base = devm_ioremap_resource(&pdev->dev, mem_res);
  601. if (IS_ERR(sspi->base)) {
  602. ret = PTR_ERR(sspi->base);
  603. goto free_master;
  604. }
  605. irq = platform_get_irq(pdev, 0);
  606. if (irq < 0) {
  607. ret = -ENXIO;
  608. goto free_master;
  609. }
  610. ret = devm_request_irq(&pdev->dev, irq, spi_sirfsoc_irq, 0,
  611. DRIVER_NAME, sspi);
  612. if (ret)
  613. goto free_master;
  614. sspi->bitbang.master = master;
  615. sspi->bitbang.chipselect = spi_sirfsoc_chipselect;
  616. sspi->bitbang.setup_transfer = spi_sirfsoc_setup_transfer;
  617. sspi->bitbang.txrx_bufs = spi_sirfsoc_transfer;
  618. sspi->bitbang.master->setup = spi_sirfsoc_setup;
  619. master->bus_num = pdev->id;
  620. master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_CS_HIGH;
  621. master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(12) |
  622. SPI_BPW_MASK(16) | SPI_BPW_MASK(32);
  623. sspi->bitbang.master->dev.of_node = pdev->dev.of_node;
  624. /* request DMA channels */
  625. sspi->rx_chan = dma_request_slave_channel(&pdev->dev, "rx");
  626. if (!sspi->rx_chan) {
  627. dev_err(&pdev->dev, "can not allocate rx dma channel\n");
  628. ret = -ENODEV;
  629. goto free_master;
  630. }
  631. sspi->tx_chan = dma_request_slave_channel(&pdev->dev, "tx");
  632. if (!sspi->tx_chan) {
  633. dev_err(&pdev->dev, "can not allocate tx dma channel\n");
  634. ret = -ENODEV;
  635. goto free_rx_dma;
  636. }
  637. sspi->clk = clk_get(&pdev->dev, NULL);
  638. if (IS_ERR(sspi->clk)) {
  639. ret = PTR_ERR(sspi->clk);
  640. goto free_tx_dma;
  641. }
  642. clk_prepare_enable(sspi->clk);
  643. sspi->ctrl_freq = clk_get_rate(sspi->clk);
  644. init_completion(&sspi->rx_done);
  645. init_completion(&sspi->tx_done);
  646. writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
  647. writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
  648. writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
  649. writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
  650. /* We are not using dummy delay between command and data */
  651. writel(0, sspi->base + SIRFSOC_SPI_DUMMY_DELAY_CTL);
  652. sspi->dummypage = kmalloc(2 * PAGE_SIZE, GFP_KERNEL);
  653. if (!sspi->dummypage) {
  654. ret = -ENOMEM;
  655. goto free_clk;
  656. }
  657. ret = spi_bitbang_start(&sspi->bitbang);
  658. if (ret)
  659. goto free_dummypage;
  660. dev_info(&pdev->dev, "registerred, bus number = %d\n", master->bus_num);
  661. return 0;
  662. free_dummypage:
  663. kfree(sspi->dummypage);
  664. free_clk:
  665. clk_disable_unprepare(sspi->clk);
  666. clk_put(sspi->clk);
  667. free_tx_dma:
  668. dma_release_channel(sspi->tx_chan);
  669. free_rx_dma:
  670. dma_release_channel(sspi->rx_chan);
  671. free_master:
  672. spi_master_put(master);
  673. err_cs:
  674. return ret;
  675. }
  676. static int spi_sirfsoc_remove(struct platform_device *pdev)
  677. {
  678. struct spi_master *master;
  679. struct sirfsoc_spi *sspi;
  680. int i;
  681. master = platform_get_drvdata(pdev);
  682. sspi = spi_master_get_devdata(master);
  683. spi_bitbang_stop(&sspi->bitbang);
  684. for (i = 0; i < master->num_chipselect; i++) {
  685. if (sspi->chipselect[i] > 0)
  686. gpio_free(sspi->chipselect[i]);
  687. }
  688. kfree(sspi->dummypage);
  689. clk_disable_unprepare(sspi->clk);
  690. clk_put(sspi->clk);
  691. dma_release_channel(sspi->rx_chan);
  692. dma_release_channel(sspi->tx_chan);
  693. spi_master_put(master);
  694. return 0;
  695. }
  696. #ifdef CONFIG_PM_SLEEP
  697. static int spi_sirfsoc_suspend(struct device *dev)
  698. {
  699. struct spi_master *master = dev_get_drvdata(dev);
  700. struct sirfsoc_spi *sspi = spi_master_get_devdata(master);
  701. int ret;
  702. ret = spi_master_suspend(master);
  703. if (ret)
  704. return ret;
  705. clk_disable(sspi->clk);
  706. return 0;
  707. }
  708. static int spi_sirfsoc_resume(struct device *dev)
  709. {
  710. struct spi_master *master = dev_get_drvdata(dev);
  711. struct sirfsoc_spi *sspi = spi_master_get_devdata(master);
  712. clk_enable(sspi->clk);
  713. writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
  714. writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
  715. writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
  716. writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
  717. return spi_master_resume(master);
  718. }
  719. #endif
  720. static SIMPLE_DEV_PM_OPS(spi_sirfsoc_pm_ops, spi_sirfsoc_suspend,
  721. spi_sirfsoc_resume);
  722. static const struct of_device_id spi_sirfsoc_of_match[] = {
  723. { .compatible = "sirf,prima2-spi", },
  724. { .compatible = "sirf,marco-spi", },
  725. {}
  726. };
  727. MODULE_DEVICE_TABLE(of, spi_sirfsoc_of_match);
  728. static struct platform_driver spi_sirfsoc_driver = {
  729. .driver = {
  730. .name = DRIVER_NAME,
  731. .owner = THIS_MODULE,
  732. .pm = &spi_sirfsoc_pm_ops,
  733. .of_match_table = spi_sirfsoc_of_match,
  734. },
  735. .probe = spi_sirfsoc_probe,
  736. .remove = spi_sirfsoc_remove,
  737. };
  738. module_platform_driver(spi_sirfsoc_driver);
  739. MODULE_DESCRIPTION("SiRF SoC SPI master driver");
  740. MODULE_AUTHOR("Zhiwu Song <Zhiwu.Song@csr.com>");
  741. MODULE_AUTHOR("Barry Song <Baohua.Song@csr.com>");
  742. MODULE_LICENSE("GPL v2");