spi-sirf.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834
  1. /*
  2. * SPI bus driver for CSR SiRFprimaII
  3. *
  4. * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
  5. *
  6. * Licensed under GPLv2 or later.
  7. */
  8. #include <linux/module.h>
  9. #include <linux/kernel.h>
  10. #include <linux/slab.h>
  11. #include <linux/clk.h>
  12. #include <linux/completion.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/io.h>
  15. #include <linux/of.h>
  16. #include <linux/bitops.h>
  17. #include <linux/err.h>
  18. #include <linux/platform_device.h>
  19. #include <linux/of_gpio.h>
  20. #include <linux/spi/spi.h>
  21. #include <linux/spi/spi_bitbang.h>
  22. #include <linux/dmaengine.h>
  23. #include <linux/dma-direction.h>
  24. #include <linux/dma-mapping.h>
  25. #define DRIVER_NAME "sirfsoc_spi"
  26. #define SIRFSOC_SPI_CTRL 0x0000
  27. #define SIRFSOC_SPI_CMD 0x0004
  28. #define SIRFSOC_SPI_TX_RX_EN 0x0008
  29. #define SIRFSOC_SPI_INT_EN 0x000C
  30. #define SIRFSOC_SPI_INT_STATUS 0x0010
  31. #define SIRFSOC_SPI_TX_DMA_IO_CTRL 0x0100
  32. #define SIRFSOC_SPI_TX_DMA_IO_LEN 0x0104
  33. #define SIRFSOC_SPI_TXFIFO_CTRL 0x0108
  34. #define SIRFSOC_SPI_TXFIFO_LEVEL_CHK 0x010C
  35. #define SIRFSOC_SPI_TXFIFO_OP 0x0110
  36. #define SIRFSOC_SPI_TXFIFO_STATUS 0x0114
  37. #define SIRFSOC_SPI_TXFIFO_DATA 0x0118
  38. #define SIRFSOC_SPI_RX_DMA_IO_CTRL 0x0120
  39. #define SIRFSOC_SPI_RX_DMA_IO_LEN 0x0124
  40. #define SIRFSOC_SPI_RXFIFO_CTRL 0x0128
  41. #define SIRFSOC_SPI_RXFIFO_LEVEL_CHK 0x012C
  42. #define SIRFSOC_SPI_RXFIFO_OP 0x0130
  43. #define SIRFSOC_SPI_RXFIFO_STATUS 0x0134
  44. #define SIRFSOC_SPI_RXFIFO_DATA 0x0138
  45. #define SIRFSOC_SPI_DUMMY_DELAY_CTL 0x0144
  46. /* SPI CTRL register defines */
  47. #define SIRFSOC_SPI_SLV_MODE BIT(16)
  48. #define SIRFSOC_SPI_CMD_MODE BIT(17)
  49. #define SIRFSOC_SPI_CS_IO_OUT BIT(18)
  50. #define SIRFSOC_SPI_CS_IO_MODE BIT(19)
  51. #define SIRFSOC_SPI_CLK_IDLE_STAT BIT(20)
  52. #define SIRFSOC_SPI_CS_IDLE_STAT BIT(21)
  53. #define SIRFSOC_SPI_TRAN_MSB BIT(22)
  54. #define SIRFSOC_SPI_DRV_POS_EDGE BIT(23)
  55. #define SIRFSOC_SPI_CS_HOLD_TIME BIT(24)
  56. #define SIRFSOC_SPI_CLK_SAMPLE_MODE BIT(25)
  57. #define SIRFSOC_SPI_TRAN_DAT_FORMAT_8 (0 << 26)
  58. #define SIRFSOC_SPI_TRAN_DAT_FORMAT_12 (1 << 26)
  59. #define SIRFSOC_SPI_TRAN_DAT_FORMAT_16 (2 << 26)
  60. #define SIRFSOC_SPI_TRAN_DAT_FORMAT_32 (3 << 26)
  61. #define SIRFSOC_SPI_CMD_BYTE_NUM(x) ((x & 3) << 28)
  62. #define SIRFSOC_SPI_ENA_AUTO_CLR BIT(30)
  63. #define SIRFSOC_SPI_MUL_DAT_MODE BIT(31)
  64. /* Interrupt Enable */
  65. #define SIRFSOC_SPI_RX_DONE_INT_EN BIT(0)
  66. #define SIRFSOC_SPI_TX_DONE_INT_EN BIT(1)
  67. #define SIRFSOC_SPI_RX_OFLOW_INT_EN BIT(2)
  68. #define SIRFSOC_SPI_TX_UFLOW_INT_EN BIT(3)
  69. #define SIRFSOC_SPI_RX_IO_DMA_INT_EN BIT(4)
  70. #define SIRFSOC_SPI_TX_IO_DMA_INT_EN BIT(5)
  71. #define SIRFSOC_SPI_RXFIFO_FULL_INT_EN BIT(6)
  72. #define SIRFSOC_SPI_TXFIFO_EMPTY_INT_EN BIT(7)
  73. #define SIRFSOC_SPI_RXFIFO_THD_INT_EN BIT(8)
  74. #define SIRFSOC_SPI_TXFIFO_THD_INT_EN BIT(9)
  75. #define SIRFSOC_SPI_FRM_END_INT_EN BIT(10)
  76. #define SIRFSOC_SPI_INT_MASK_ALL 0x1FFF
  77. /* Interrupt status */
  78. #define SIRFSOC_SPI_RX_DONE BIT(0)
  79. #define SIRFSOC_SPI_TX_DONE BIT(1)
  80. #define SIRFSOC_SPI_RX_OFLOW BIT(2)
  81. #define SIRFSOC_SPI_TX_UFLOW BIT(3)
  82. #define SIRFSOC_SPI_RX_IO_DMA BIT(4)
  83. #define SIRFSOC_SPI_RX_FIFO_FULL BIT(6)
  84. #define SIRFSOC_SPI_TXFIFO_EMPTY BIT(7)
  85. #define SIRFSOC_SPI_RXFIFO_THD_REACH BIT(8)
  86. #define SIRFSOC_SPI_TXFIFO_THD_REACH BIT(9)
  87. #define SIRFSOC_SPI_FRM_END BIT(10)
  88. /* TX RX enable */
  89. #define SIRFSOC_SPI_RX_EN BIT(0)
  90. #define SIRFSOC_SPI_TX_EN BIT(1)
  91. #define SIRFSOC_SPI_CMD_TX_EN BIT(2)
  92. #define SIRFSOC_SPI_IO_MODE_SEL BIT(0)
  93. #define SIRFSOC_SPI_RX_DMA_FLUSH BIT(2)
  94. /* FIFO OPs */
  95. #define SIRFSOC_SPI_FIFO_RESET BIT(0)
  96. #define SIRFSOC_SPI_FIFO_START BIT(1)
  97. /* FIFO CTRL */
  98. #define SIRFSOC_SPI_FIFO_WIDTH_BYTE (0 << 0)
  99. #define SIRFSOC_SPI_FIFO_WIDTH_WORD (1 << 0)
  100. #define SIRFSOC_SPI_FIFO_WIDTH_DWORD (2 << 0)
  101. /* FIFO Status */
  102. #define SIRFSOC_SPI_FIFO_LEVEL_MASK 0xFF
  103. #define SIRFSOC_SPI_FIFO_FULL BIT(8)
  104. #define SIRFSOC_SPI_FIFO_EMPTY BIT(9)
  105. /* 256 bytes rx/tx FIFO */
  106. #define SIRFSOC_SPI_FIFO_SIZE 256
  107. #define SIRFSOC_SPI_DAT_FRM_LEN_MAX (64 * 1024)
  108. #define SIRFSOC_SPI_FIFO_SC(x) ((x) & 0x3F)
  109. #define SIRFSOC_SPI_FIFO_LC(x) (((x) & 0x3F) << 10)
  110. #define SIRFSOC_SPI_FIFO_HC(x) (((x) & 0x3F) << 20)
  111. #define SIRFSOC_SPI_FIFO_THD(x) (((x) & 0xFF) << 2)
  112. /*
  113. * only if the rx/tx buffer and transfer size are 4-bytes aligned, we use dma
  114. * due to the limitation of dma controller
  115. */
  116. #define ALIGNED(x) (!((u32)x & 0x3))
  117. #define IS_DMA_VALID(x) (x && ALIGNED(x->tx_buf) && ALIGNED(x->rx_buf) && \
  118. ALIGNED(x->len) && (x->len < 2 * PAGE_SIZE))
  119. #define SIRFSOC_MAX_CMD_BYTES 4
  120. struct sirfsoc_spi {
  121. struct spi_bitbang bitbang;
  122. struct completion rx_done;
  123. struct completion tx_done;
  124. void __iomem *base;
  125. u32 ctrl_freq; /* SPI controller clock speed */
  126. struct clk *clk;
  127. /* rx & tx bufs from the spi_transfer */
  128. const void *tx;
  129. void *rx;
  130. /* place received word into rx buffer */
  131. void (*rx_word) (struct sirfsoc_spi *);
  132. /* get word from tx buffer for sending */
  133. void (*tx_word) (struct sirfsoc_spi *);
  134. /* number of words left to be tranmitted/received */
  135. unsigned int left_tx_word;
  136. unsigned int left_rx_word;
  137. /* rx & tx DMA channels */
  138. struct dma_chan *rx_chan;
  139. struct dma_chan *tx_chan;
  140. dma_addr_t src_start;
  141. dma_addr_t dst_start;
  142. void *dummypage;
  143. int word_width; /* in bytes */
  144. /*
  145. * if tx size is not more than 4 and rx size is NULL, use
  146. * command model
  147. */
  148. bool tx_by_cmd;
  149. bool hw_cs;
  150. };
  151. static void spi_sirfsoc_rx_word_u8(struct sirfsoc_spi *sspi)
  152. {
  153. u32 data;
  154. u8 *rx = sspi->rx;
  155. data = readl(sspi->base + SIRFSOC_SPI_RXFIFO_DATA);
  156. if (rx) {
  157. *rx++ = (u8) data;
  158. sspi->rx = rx;
  159. }
  160. sspi->left_rx_word--;
  161. }
  162. static void spi_sirfsoc_tx_word_u8(struct sirfsoc_spi *sspi)
  163. {
  164. u32 data = 0;
  165. const u8 *tx = sspi->tx;
  166. if (tx) {
  167. data = *tx++;
  168. sspi->tx = tx;
  169. }
  170. writel(data, sspi->base + SIRFSOC_SPI_TXFIFO_DATA);
  171. sspi->left_tx_word--;
  172. }
  173. static void spi_sirfsoc_rx_word_u16(struct sirfsoc_spi *sspi)
  174. {
  175. u32 data;
  176. u16 *rx = sspi->rx;
  177. data = readl(sspi->base + SIRFSOC_SPI_RXFIFO_DATA);
  178. if (rx) {
  179. *rx++ = (u16) data;
  180. sspi->rx = rx;
  181. }
  182. sspi->left_rx_word--;
  183. }
  184. static void spi_sirfsoc_tx_word_u16(struct sirfsoc_spi *sspi)
  185. {
  186. u32 data = 0;
  187. const u16 *tx = sspi->tx;
  188. if (tx) {
  189. data = *tx++;
  190. sspi->tx = tx;
  191. }
  192. writel(data, sspi->base + SIRFSOC_SPI_TXFIFO_DATA);
  193. sspi->left_tx_word--;
  194. }
  195. static void spi_sirfsoc_rx_word_u32(struct sirfsoc_spi *sspi)
  196. {
  197. u32 data;
  198. u32 *rx = sspi->rx;
  199. data = readl(sspi->base + SIRFSOC_SPI_RXFIFO_DATA);
  200. if (rx) {
  201. *rx++ = (u32) data;
  202. sspi->rx = rx;
  203. }
  204. sspi->left_rx_word--;
  205. }
  206. static void spi_sirfsoc_tx_word_u32(struct sirfsoc_spi *sspi)
  207. {
  208. u32 data = 0;
  209. const u32 *tx = sspi->tx;
  210. if (tx) {
  211. data = *tx++;
  212. sspi->tx = tx;
  213. }
  214. writel(data, sspi->base + SIRFSOC_SPI_TXFIFO_DATA);
  215. sspi->left_tx_word--;
  216. }
  217. static irqreturn_t spi_sirfsoc_irq(int irq, void *dev_id)
  218. {
  219. struct sirfsoc_spi *sspi = dev_id;
  220. u32 spi_stat = readl(sspi->base + SIRFSOC_SPI_INT_STATUS);
  221. if (sspi->tx_by_cmd && (spi_stat & SIRFSOC_SPI_FRM_END)) {
  222. complete(&sspi->tx_done);
  223. writel(0x0, sspi->base + SIRFSOC_SPI_INT_EN);
  224. writel(SIRFSOC_SPI_INT_MASK_ALL,
  225. sspi->base + SIRFSOC_SPI_INT_STATUS);
  226. return IRQ_HANDLED;
  227. }
  228. /* Error Conditions */
  229. if (spi_stat & SIRFSOC_SPI_RX_OFLOW ||
  230. spi_stat & SIRFSOC_SPI_TX_UFLOW) {
  231. complete(&sspi->tx_done);
  232. complete(&sspi->rx_done);
  233. writel(0x0, sspi->base + SIRFSOC_SPI_INT_EN);
  234. writel(SIRFSOC_SPI_INT_MASK_ALL,
  235. sspi->base + SIRFSOC_SPI_INT_STATUS);
  236. return IRQ_HANDLED;
  237. }
  238. if (spi_stat & SIRFSOC_SPI_TXFIFO_EMPTY)
  239. complete(&sspi->tx_done);
  240. while (!(readl(sspi->base + SIRFSOC_SPI_INT_STATUS) &
  241. SIRFSOC_SPI_RX_IO_DMA))
  242. cpu_relax();
  243. complete(&sspi->rx_done);
  244. writel(0x0, sspi->base + SIRFSOC_SPI_INT_EN);
  245. writel(SIRFSOC_SPI_INT_MASK_ALL,
  246. sspi->base + SIRFSOC_SPI_INT_STATUS);
  247. return IRQ_HANDLED;
  248. }
  249. static void spi_sirfsoc_dma_fini_callback(void *data)
  250. {
  251. struct completion *dma_complete = data;
  252. complete(dma_complete);
  253. }
  254. static void spi_sirfsoc_cmd_transfer(struct spi_device *spi,
  255. struct spi_transfer *t)
  256. {
  257. struct sirfsoc_spi *sspi;
  258. int timeout = t->len * 10;
  259. u32 cmd;
  260. sspi = spi_master_get_devdata(spi->master);
  261. writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
  262. writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
  263. memcpy(&cmd, sspi->tx, t->len);
  264. if (sspi->word_width == 1 && !(spi->mode & SPI_LSB_FIRST))
  265. cmd = cpu_to_be32(cmd) >>
  266. ((SIRFSOC_MAX_CMD_BYTES - t->len) * 8);
  267. if (sspi->word_width == 2 && t->len == 4 &&
  268. (!(spi->mode & SPI_LSB_FIRST)))
  269. cmd = ((cmd & 0xffff) << 16) | (cmd >> 16);
  270. writel(cmd, sspi->base + SIRFSOC_SPI_CMD);
  271. writel(SIRFSOC_SPI_FRM_END_INT_EN,
  272. sspi->base + SIRFSOC_SPI_INT_EN);
  273. writel(SIRFSOC_SPI_CMD_TX_EN,
  274. sspi->base + SIRFSOC_SPI_TX_RX_EN);
  275. if (wait_for_completion_timeout(&sspi->tx_done, timeout) == 0) {
  276. dev_err(&spi->dev, "cmd transfer timeout\n");
  277. return;
  278. }
  279. sspi->left_rx_word -= t->len;
  280. }
  281. static void spi_sirfsoc_dma_transfer(struct spi_device *spi,
  282. struct spi_transfer *t)
  283. {
  284. struct sirfsoc_spi *sspi;
  285. struct dma_async_tx_descriptor *rx_desc, *tx_desc;
  286. int timeout = t->len * 10;
  287. sspi = spi_master_get_devdata(spi->master);
  288. writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
  289. writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
  290. writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
  291. writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
  292. writel(0, sspi->base + SIRFSOC_SPI_INT_EN);
  293. writel(SIRFSOC_SPI_INT_MASK_ALL, sspi->base + SIRFSOC_SPI_INT_STATUS);
  294. if (sspi->left_tx_word < SIRFSOC_SPI_DAT_FRM_LEN_MAX) {
  295. writel(readl(sspi->base + SIRFSOC_SPI_CTRL) |
  296. SIRFSOC_SPI_ENA_AUTO_CLR | SIRFSOC_SPI_MUL_DAT_MODE,
  297. sspi->base + SIRFSOC_SPI_CTRL);
  298. writel(sspi->left_tx_word - 1,
  299. sspi->base + SIRFSOC_SPI_TX_DMA_IO_LEN);
  300. writel(sspi->left_tx_word - 1,
  301. sspi->base + SIRFSOC_SPI_RX_DMA_IO_LEN);
  302. } else {
  303. writel(readl(sspi->base + SIRFSOC_SPI_CTRL),
  304. sspi->base + SIRFSOC_SPI_CTRL);
  305. writel(0, sspi->base + SIRFSOC_SPI_TX_DMA_IO_LEN);
  306. writel(0, sspi->base + SIRFSOC_SPI_RX_DMA_IO_LEN);
  307. }
  308. sspi->dst_start = dma_map_single(&spi->dev, sspi->rx, t->len,
  309. (t->tx_buf != t->rx_buf) ?
  310. DMA_FROM_DEVICE : DMA_BIDIRECTIONAL);
  311. rx_desc = dmaengine_prep_slave_single(sspi->rx_chan,
  312. sspi->dst_start, t->len, DMA_DEV_TO_MEM,
  313. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  314. rx_desc->callback = spi_sirfsoc_dma_fini_callback;
  315. rx_desc->callback_param = &sspi->rx_done;
  316. sspi->src_start = dma_map_single(&spi->dev, (void *)sspi->tx, t->len,
  317. (t->tx_buf != t->rx_buf) ?
  318. DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
  319. tx_desc = dmaengine_prep_slave_single(sspi->tx_chan,
  320. sspi->src_start, t->len, DMA_MEM_TO_DEV,
  321. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  322. tx_desc->callback = spi_sirfsoc_dma_fini_callback;
  323. tx_desc->callback_param = &sspi->tx_done;
  324. dmaengine_submit(tx_desc);
  325. dmaengine_submit(rx_desc);
  326. dma_async_issue_pending(sspi->tx_chan);
  327. dma_async_issue_pending(sspi->rx_chan);
  328. writel(SIRFSOC_SPI_RX_EN | SIRFSOC_SPI_TX_EN,
  329. sspi->base + SIRFSOC_SPI_TX_RX_EN);
  330. if (wait_for_completion_timeout(&sspi->rx_done, timeout) == 0) {
  331. dev_err(&spi->dev, "transfer timeout\n");
  332. dmaengine_terminate_all(sspi->rx_chan);
  333. } else
  334. sspi->left_rx_word = 0;
  335. /*
  336. * we only wait tx-done event if transferring by DMA. for PIO,
  337. * we get rx data by writing tx data, so if rx is done, tx has
  338. * done earlier
  339. */
  340. if (wait_for_completion_timeout(&sspi->tx_done, timeout) == 0) {
  341. dev_err(&spi->dev, "transfer timeout\n");
  342. dmaengine_terminate_all(sspi->tx_chan);
  343. }
  344. dma_unmap_single(&spi->dev, sspi->src_start, t->len, DMA_TO_DEVICE);
  345. dma_unmap_single(&spi->dev, sspi->dst_start, t->len, DMA_FROM_DEVICE);
  346. /* TX, RX FIFO stop */
  347. writel(0, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
  348. writel(0, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
  349. if (sspi->left_tx_word >= SIRFSOC_SPI_DAT_FRM_LEN_MAX)
  350. writel(0, sspi->base + SIRFSOC_SPI_TX_RX_EN);
  351. }
  352. static void spi_sirfsoc_pio_transfer(struct spi_device *spi,
  353. struct spi_transfer *t)
  354. {
  355. struct sirfsoc_spi *sspi;
  356. int timeout = t->len * 10;
  357. sspi = spi_master_get_devdata(spi->master);
  358. do {
  359. writel(SIRFSOC_SPI_FIFO_RESET,
  360. sspi->base + SIRFSOC_SPI_RXFIFO_OP);
  361. writel(SIRFSOC_SPI_FIFO_RESET,
  362. sspi->base + SIRFSOC_SPI_TXFIFO_OP);
  363. writel(SIRFSOC_SPI_FIFO_START,
  364. sspi->base + SIRFSOC_SPI_RXFIFO_OP);
  365. writel(SIRFSOC_SPI_FIFO_START,
  366. sspi->base + SIRFSOC_SPI_TXFIFO_OP);
  367. writel(0, sspi->base + SIRFSOC_SPI_INT_EN);
  368. writel(SIRFSOC_SPI_INT_MASK_ALL,
  369. sspi->base + SIRFSOC_SPI_INT_STATUS);
  370. writel(readl(sspi->base + SIRFSOC_SPI_CTRL) |
  371. SIRFSOC_SPI_MUL_DAT_MODE | SIRFSOC_SPI_ENA_AUTO_CLR,
  372. sspi->base + SIRFSOC_SPI_CTRL);
  373. writel(min(sspi->left_tx_word, (u32)(256 / sspi->word_width))
  374. - 1, sspi->base + SIRFSOC_SPI_TX_DMA_IO_LEN);
  375. writel(min(sspi->left_rx_word, (u32)(256 / sspi->word_width))
  376. - 1, sspi->base + SIRFSOC_SPI_RX_DMA_IO_LEN);
  377. while (!((readl(sspi->base + SIRFSOC_SPI_TXFIFO_STATUS)
  378. & SIRFSOC_SPI_FIFO_FULL)) && sspi->left_tx_word)
  379. sspi->tx_word(sspi);
  380. writel(SIRFSOC_SPI_TXFIFO_EMPTY_INT_EN |
  381. SIRFSOC_SPI_TX_UFLOW_INT_EN |
  382. SIRFSOC_SPI_RX_OFLOW_INT_EN |
  383. SIRFSOC_SPI_RX_IO_DMA_INT_EN,
  384. sspi->base + SIRFSOC_SPI_INT_EN);
  385. writel(SIRFSOC_SPI_RX_EN | SIRFSOC_SPI_TX_EN,
  386. sspi->base + SIRFSOC_SPI_TX_RX_EN);
  387. if (!wait_for_completion_timeout(&sspi->tx_done, timeout) ||
  388. !wait_for_completion_timeout(&sspi->rx_done, timeout)) {
  389. dev_err(&spi->dev, "transfer timeout\n");
  390. break;
  391. }
  392. while (!((readl(sspi->base + SIRFSOC_SPI_RXFIFO_STATUS)
  393. & SIRFSOC_SPI_FIFO_EMPTY)) && sspi->left_rx_word)
  394. sspi->rx_word(sspi);
  395. writel(0, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
  396. writel(0, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
  397. } while (sspi->left_tx_word != 0 || sspi->left_rx_word != 0);
  398. }
  399. static int spi_sirfsoc_transfer(struct spi_device *spi, struct spi_transfer *t)
  400. {
  401. struct sirfsoc_spi *sspi;
  402. sspi = spi_master_get_devdata(spi->master);
  403. sspi->tx = t->tx_buf ? t->tx_buf : sspi->dummypage;
  404. sspi->rx = t->rx_buf ? t->rx_buf : sspi->dummypage;
  405. sspi->left_tx_word = sspi->left_rx_word = t->len / sspi->word_width;
  406. reinit_completion(&sspi->rx_done);
  407. reinit_completion(&sspi->tx_done);
  408. /*
  409. * in the transfer, if transfer data using command register with rx_buf
  410. * null, just fill command data into command register and wait for its
  411. * completion.
  412. */
  413. if (sspi->tx_by_cmd)
  414. spi_sirfsoc_cmd_transfer(spi, t);
  415. else if (IS_DMA_VALID(t))
  416. spi_sirfsoc_dma_transfer(spi, t);
  417. else
  418. spi_sirfsoc_pio_transfer(spi, t);
  419. return t->len - sspi->left_rx_word * sspi->word_width;
  420. }
  421. static void spi_sirfsoc_chipselect(struct spi_device *spi, int value)
  422. {
  423. struct sirfsoc_spi *sspi = spi_master_get_devdata(spi->master);
  424. if (sspi->hw_cs) {
  425. u32 regval = readl(sspi->base + SIRFSOC_SPI_CTRL);
  426. switch (value) {
  427. case BITBANG_CS_ACTIVE:
  428. if (spi->mode & SPI_CS_HIGH)
  429. regval |= SIRFSOC_SPI_CS_IO_OUT;
  430. else
  431. regval &= ~SIRFSOC_SPI_CS_IO_OUT;
  432. break;
  433. case BITBANG_CS_INACTIVE:
  434. if (spi->mode & SPI_CS_HIGH)
  435. regval &= ~SIRFSOC_SPI_CS_IO_OUT;
  436. else
  437. regval |= SIRFSOC_SPI_CS_IO_OUT;
  438. break;
  439. }
  440. writel(regval, sspi->base + SIRFSOC_SPI_CTRL);
  441. } else {
  442. switch (value) {
  443. case BITBANG_CS_ACTIVE:
  444. gpio_direction_output(spi->cs_gpio,
  445. spi->mode & SPI_CS_HIGH ? 1 : 0);
  446. break;
  447. case BITBANG_CS_INACTIVE:
  448. gpio_direction_output(spi->cs_gpio,
  449. spi->mode & SPI_CS_HIGH ? 0 : 1);
  450. break;
  451. }
  452. }
  453. }
  454. static int
  455. spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
  456. {
  457. struct sirfsoc_spi *sspi;
  458. u8 bits_per_word = 0;
  459. int hz = 0;
  460. u32 regval;
  461. u32 txfifo_ctrl, rxfifo_ctrl;
  462. u32 fifo_size = SIRFSOC_SPI_FIFO_SIZE / 4;
  463. sspi = spi_master_get_devdata(spi->master);
  464. bits_per_word = (t) ? t->bits_per_word : spi->bits_per_word;
  465. hz = t && t->speed_hz ? t->speed_hz : spi->max_speed_hz;
  466. regval = (sspi->ctrl_freq / (2 * hz)) - 1;
  467. if (regval > 0xFFFF || regval < 0) {
  468. dev_err(&spi->dev, "Speed %d not supported\n", hz);
  469. return -EINVAL;
  470. }
  471. switch (bits_per_word) {
  472. case 8:
  473. regval |= SIRFSOC_SPI_TRAN_DAT_FORMAT_8;
  474. sspi->rx_word = spi_sirfsoc_rx_word_u8;
  475. sspi->tx_word = spi_sirfsoc_tx_word_u8;
  476. break;
  477. case 12:
  478. case 16:
  479. regval |= (bits_per_word == 12) ?
  480. SIRFSOC_SPI_TRAN_DAT_FORMAT_12 :
  481. SIRFSOC_SPI_TRAN_DAT_FORMAT_16;
  482. sspi->rx_word = spi_sirfsoc_rx_word_u16;
  483. sspi->tx_word = spi_sirfsoc_tx_word_u16;
  484. break;
  485. case 32:
  486. regval |= SIRFSOC_SPI_TRAN_DAT_FORMAT_32;
  487. sspi->rx_word = spi_sirfsoc_rx_word_u32;
  488. sspi->tx_word = spi_sirfsoc_tx_word_u32;
  489. break;
  490. default:
  491. BUG();
  492. }
  493. sspi->word_width = DIV_ROUND_UP(bits_per_word, 8);
  494. txfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
  495. sspi->word_width;
  496. rxfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
  497. sspi->word_width;
  498. if (!(spi->mode & SPI_CS_HIGH))
  499. regval |= SIRFSOC_SPI_CS_IDLE_STAT;
  500. if (!(spi->mode & SPI_LSB_FIRST))
  501. regval |= SIRFSOC_SPI_TRAN_MSB;
  502. if (spi->mode & SPI_CPOL)
  503. regval |= SIRFSOC_SPI_CLK_IDLE_STAT;
  504. /*
  505. * Data should be driven at least 1/2 cycle before the fetch edge
  506. * to make sure that data gets stable at the fetch edge.
  507. */
  508. if (((spi->mode & SPI_CPOL) && (spi->mode & SPI_CPHA)) ||
  509. (!(spi->mode & SPI_CPOL) && !(spi->mode & SPI_CPHA)))
  510. regval &= ~SIRFSOC_SPI_DRV_POS_EDGE;
  511. else
  512. regval |= SIRFSOC_SPI_DRV_POS_EDGE;
  513. writel(SIRFSOC_SPI_FIFO_SC(fifo_size - 2) |
  514. SIRFSOC_SPI_FIFO_LC(fifo_size / 2) |
  515. SIRFSOC_SPI_FIFO_HC(2),
  516. sspi->base + SIRFSOC_SPI_TXFIFO_LEVEL_CHK);
  517. writel(SIRFSOC_SPI_FIFO_SC(2) |
  518. SIRFSOC_SPI_FIFO_LC(fifo_size / 2) |
  519. SIRFSOC_SPI_FIFO_HC(fifo_size - 2),
  520. sspi->base + SIRFSOC_SPI_RXFIFO_LEVEL_CHK);
  521. writel(txfifo_ctrl, sspi->base + SIRFSOC_SPI_TXFIFO_CTRL);
  522. writel(rxfifo_ctrl, sspi->base + SIRFSOC_SPI_RXFIFO_CTRL);
  523. if (t && t->tx_buf && !t->rx_buf && (t->len <= SIRFSOC_MAX_CMD_BYTES)) {
  524. regval |= (SIRFSOC_SPI_CMD_BYTE_NUM((t->len - 1)) |
  525. SIRFSOC_SPI_CMD_MODE);
  526. sspi->tx_by_cmd = true;
  527. } else {
  528. regval &= ~SIRFSOC_SPI_CMD_MODE;
  529. sspi->tx_by_cmd = false;
  530. }
  531. /*
  532. * it should never set to hardware cs mode because in hardware cs mode,
  533. * cs signal can't controlled by driver.
  534. */
  535. regval |= SIRFSOC_SPI_CS_IO_MODE;
  536. writel(regval, sspi->base + SIRFSOC_SPI_CTRL);
  537. if (IS_DMA_VALID(t)) {
  538. /* Enable DMA mode for RX, TX */
  539. writel(0, sspi->base + SIRFSOC_SPI_TX_DMA_IO_CTRL);
  540. writel(SIRFSOC_SPI_RX_DMA_FLUSH,
  541. sspi->base + SIRFSOC_SPI_RX_DMA_IO_CTRL);
  542. } else {
  543. /* Enable IO mode for RX, TX */
  544. writel(SIRFSOC_SPI_IO_MODE_SEL,
  545. sspi->base + SIRFSOC_SPI_TX_DMA_IO_CTRL);
  546. writel(SIRFSOC_SPI_IO_MODE_SEL,
  547. sspi->base + SIRFSOC_SPI_RX_DMA_IO_CTRL);
  548. }
  549. return 0;
  550. }
  551. static int spi_sirfsoc_setup(struct spi_device *spi)
  552. {
  553. struct sirfsoc_spi *sspi;
  554. if (!spi->max_speed_hz)
  555. return -EINVAL;
  556. sspi = spi_master_get_devdata(spi->master);
  557. if (spi->cs_gpio == -ENOENT)
  558. sspi->hw_cs = true;
  559. else
  560. sspi->hw_cs = false;
  561. return spi_sirfsoc_setup_transfer(spi, NULL);
  562. }
  563. static int spi_sirfsoc_probe(struct platform_device *pdev)
  564. {
  565. struct sirfsoc_spi *sspi;
  566. struct spi_master *master;
  567. struct resource *mem_res;
  568. int irq;
  569. int i, ret;
  570. master = spi_alloc_master(&pdev->dev, sizeof(*sspi));
  571. if (!master) {
  572. dev_err(&pdev->dev, "Unable to allocate SPI master\n");
  573. return -ENOMEM;
  574. }
  575. platform_set_drvdata(pdev, master);
  576. sspi = spi_master_get_devdata(master);
  577. mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  578. sspi->base = devm_ioremap_resource(&pdev->dev, mem_res);
  579. if (IS_ERR(sspi->base)) {
  580. ret = PTR_ERR(sspi->base);
  581. goto free_master;
  582. }
  583. irq = platform_get_irq(pdev, 0);
  584. if (irq < 0) {
  585. ret = -ENXIO;
  586. goto free_master;
  587. }
  588. ret = devm_request_irq(&pdev->dev, irq, spi_sirfsoc_irq, 0,
  589. DRIVER_NAME, sspi);
  590. if (ret)
  591. goto free_master;
  592. sspi->bitbang.master = master;
  593. sspi->bitbang.chipselect = spi_sirfsoc_chipselect;
  594. sspi->bitbang.setup_transfer = spi_sirfsoc_setup_transfer;
  595. sspi->bitbang.txrx_bufs = spi_sirfsoc_transfer;
  596. sspi->bitbang.master->setup = spi_sirfsoc_setup;
  597. master->bus_num = pdev->id;
  598. master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_CS_HIGH;
  599. master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(12) |
  600. SPI_BPW_MASK(16) | SPI_BPW_MASK(32);
  601. sspi->bitbang.master->dev.of_node = pdev->dev.of_node;
  602. /* request DMA channels */
  603. sspi->rx_chan = dma_request_slave_channel(&pdev->dev, "rx");
  604. if (!sspi->rx_chan) {
  605. dev_err(&pdev->dev, "can not allocate rx dma channel\n");
  606. ret = -ENODEV;
  607. goto free_master;
  608. }
  609. sspi->tx_chan = dma_request_slave_channel(&pdev->dev, "tx");
  610. if (!sspi->tx_chan) {
  611. dev_err(&pdev->dev, "can not allocate tx dma channel\n");
  612. ret = -ENODEV;
  613. goto free_rx_dma;
  614. }
  615. sspi->clk = clk_get(&pdev->dev, NULL);
  616. if (IS_ERR(sspi->clk)) {
  617. ret = PTR_ERR(sspi->clk);
  618. goto free_tx_dma;
  619. }
  620. clk_prepare_enable(sspi->clk);
  621. sspi->ctrl_freq = clk_get_rate(sspi->clk);
  622. init_completion(&sspi->rx_done);
  623. init_completion(&sspi->tx_done);
  624. writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
  625. writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
  626. writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
  627. writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
  628. /* We are not using dummy delay between command and data */
  629. writel(0, sspi->base + SIRFSOC_SPI_DUMMY_DELAY_CTL);
  630. sspi->dummypage = kmalloc(2 * PAGE_SIZE, GFP_KERNEL);
  631. if (!sspi->dummypage) {
  632. ret = -ENOMEM;
  633. goto free_clk;
  634. }
  635. ret = spi_bitbang_start(&sspi->bitbang);
  636. if (ret)
  637. goto free_dummypage;
  638. for (i = 0; master->cs_gpios && i < master->num_chipselect; i++) {
  639. if (master->cs_gpios[i] == -ENOENT)
  640. continue;
  641. if (!gpio_is_valid(master->cs_gpios[i])) {
  642. dev_err(&pdev->dev, "no valid gpio\n");
  643. ret = -EINVAL;
  644. goto free_dummypage;
  645. }
  646. ret = devm_gpio_request(&pdev->dev,
  647. master->cs_gpios[i], DRIVER_NAME);
  648. if (ret) {
  649. dev_err(&pdev->dev, "failed to request gpio\n");
  650. goto free_dummypage;
  651. }
  652. }
  653. dev_info(&pdev->dev, "registerred, bus number = %d\n", master->bus_num);
  654. return 0;
  655. free_dummypage:
  656. kfree(sspi->dummypage);
  657. free_clk:
  658. clk_disable_unprepare(sspi->clk);
  659. clk_put(sspi->clk);
  660. free_tx_dma:
  661. dma_release_channel(sspi->tx_chan);
  662. free_rx_dma:
  663. dma_release_channel(sspi->rx_chan);
  664. free_master:
  665. spi_master_put(master);
  666. return ret;
  667. }
  668. static int spi_sirfsoc_remove(struct platform_device *pdev)
  669. {
  670. struct spi_master *master;
  671. struct sirfsoc_spi *sspi;
  672. master = platform_get_drvdata(pdev);
  673. sspi = spi_master_get_devdata(master);
  674. spi_bitbang_stop(&sspi->bitbang);
  675. kfree(sspi->dummypage);
  676. clk_disable_unprepare(sspi->clk);
  677. clk_put(sspi->clk);
  678. dma_release_channel(sspi->rx_chan);
  679. dma_release_channel(sspi->tx_chan);
  680. spi_master_put(master);
  681. return 0;
  682. }
  683. #ifdef CONFIG_PM_SLEEP
  684. static int spi_sirfsoc_suspend(struct device *dev)
  685. {
  686. struct spi_master *master = dev_get_drvdata(dev);
  687. struct sirfsoc_spi *sspi = spi_master_get_devdata(master);
  688. int ret;
  689. ret = spi_master_suspend(master);
  690. if (ret)
  691. return ret;
  692. clk_disable(sspi->clk);
  693. return 0;
  694. }
  695. static int spi_sirfsoc_resume(struct device *dev)
  696. {
  697. struct spi_master *master = dev_get_drvdata(dev);
  698. struct sirfsoc_spi *sspi = spi_master_get_devdata(master);
  699. clk_enable(sspi->clk);
  700. writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
  701. writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
  702. writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
  703. writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
  704. return spi_master_resume(master);
  705. }
  706. #endif
  707. static SIMPLE_DEV_PM_OPS(spi_sirfsoc_pm_ops, spi_sirfsoc_suspend,
  708. spi_sirfsoc_resume);
  709. static const struct of_device_id spi_sirfsoc_of_match[] = {
  710. { .compatible = "sirf,prima2-spi", },
  711. { .compatible = "sirf,marco-spi", },
  712. {}
  713. };
  714. MODULE_DEVICE_TABLE(of, spi_sirfsoc_of_match);
  715. static struct platform_driver spi_sirfsoc_driver = {
  716. .driver = {
  717. .name = DRIVER_NAME,
  718. .owner = THIS_MODULE,
  719. .pm = &spi_sirfsoc_pm_ops,
  720. .of_match_table = spi_sirfsoc_of_match,
  721. },
  722. .probe = spi_sirfsoc_probe,
  723. .remove = spi_sirfsoc_remove,
  724. };
  725. module_platform_driver(spi_sirfsoc_driver);
  726. MODULE_DESCRIPTION("SiRF SoC SPI master driver");
  727. MODULE_AUTHOR("Zhiwu Song <Zhiwu.Song@csr.com>");
  728. MODULE_AUTHOR("Barry Song <Baohua.Song@csr.com>");
  729. MODULE_LICENSE("GPL v2");