spi-qup.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823
  1. /*
  2. * Copyright (c) 2008-2014, The Linux foundation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License rev 2 and
  6. * only rev 2 as published by the free Software foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or fITNESS fOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. */
  13. #include <linux/clk.h>
  14. #include <linux/delay.h>
  15. #include <linux/err.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/io.h>
  18. #include <linux/list.h>
  19. #include <linux/module.h>
  20. #include <linux/of.h>
  21. #include <linux/platform_device.h>
  22. #include <linux/pm_runtime.h>
  23. #include <linux/spi/spi.h>
  24. #define QUP_CONFIG 0x0000
  25. #define QUP_STATE 0x0004
  26. #define QUP_IO_M_MODES 0x0008
  27. #define QUP_SW_RESET 0x000c
  28. #define QUP_OPERATIONAL 0x0018
  29. #define QUP_ERROR_FLAGS 0x001c
  30. #define QUP_ERROR_FLAGS_EN 0x0020
  31. #define QUP_OPERATIONAL_MASK 0x0028
  32. #define QUP_HW_VERSION 0x0030
  33. #define QUP_MX_OUTPUT_CNT 0x0100
  34. #define QUP_OUTPUT_FIFO 0x0110
  35. #define QUP_MX_WRITE_CNT 0x0150
  36. #define QUP_MX_INPUT_CNT 0x0200
  37. #define QUP_MX_READ_CNT 0x0208
  38. #define QUP_INPUT_FIFO 0x0218
  39. #define SPI_CONFIG 0x0300
  40. #define SPI_IO_CONTROL 0x0304
  41. #define SPI_ERROR_FLAGS 0x0308
  42. #define SPI_ERROR_FLAGS_EN 0x030c
  43. /* QUP_CONFIG fields */
  44. #define QUP_CONFIG_SPI_MODE (1 << 8)
  45. #define QUP_CONFIG_CLOCK_AUTO_GATE BIT(13)
  46. #define QUP_CONFIG_NO_INPUT BIT(7)
  47. #define QUP_CONFIG_NO_OUTPUT BIT(6)
  48. #define QUP_CONFIG_N 0x001f
  49. /* QUP_STATE fields */
  50. #define QUP_STATE_VALID BIT(2)
  51. #define QUP_STATE_RESET 0
  52. #define QUP_STATE_RUN 1
  53. #define QUP_STATE_PAUSE 3
  54. #define QUP_STATE_MASK 3
  55. #define QUP_STATE_CLEAR 2
  56. #define QUP_HW_VERSION_2_1_1 0x20010001
  57. /* QUP_IO_M_MODES fields */
  58. #define QUP_IO_M_PACK_EN BIT(15)
  59. #define QUP_IO_M_UNPACK_EN BIT(14)
  60. #define QUP_IO_M_INPUT_MODE_MASK_SHIFT 12
  61. #define QUP_IO_M_OUTPUT_MODE_MASK_SHIFT 10
  62. #define QUP_IO_M_INPUT_MODE_MASK (3 << QUP_IO_M_INPUT_MODE_MASK_SHIFT)
  63. #define QUP_IO_M_OUTPUT_MODE_MASK (3 << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT)
  64. #define QUP_IO_M_OUTPUT_BLOCK_SIZE(x) (((x) & (0x03 << 0)) >> 0)
  65. #define QUP_IO_M_OUTPUT_FIFO_SIZE(x) (((x) & (0x07 << 2)) >> 2)
  66. #define QUP_IO_M_INPUT_BLOCK_SIZE(x) (((x) & (0x03 << 5)) >> 5)
  67. #define QUP_IO_M_INPUT_FIFO_SIZE(x) (((x) & (0x07 << 7)) >> 7)
  68. #define QUP_IO_M_MODE_FIFO 0
  69. #define QUP_IO_M_MODE_BLOCK 1
  70. #define QUP_IO_M_MODE_DMOV 2
  71. #define QUP_IO_M_MODE_BAM 3
  72. /* QUP_OPERATIONAL fields */
  73. #define QUP_OP_MAX_INPUT_DONE_FLAG BIT(11)
  74. #define QUP_OP_MAX_OUTPUT_DONE_FLAG BIT(10)
  75. #define QUP_OP_IN_SERVICE_FLAG BIT(9)
  76. #define QUP_OP_OUT_SERVICE_FLAG BIT(8)
  77. #define QUP_OP_IN_FIFO_FULL BIT(7)
  78. #define QUP_OP_OUT_FIFO_FULL BIT(6)
  79. #define QUP_OP_IN_FIFO_NOT_EMPTY BIT(5)
  80. #define QUP_OP_OUT_FIFO_NOT_EMPTY BIT(4)
  81. /* QUP_ERROR_FLAGS and QUP_ERROR_FLAGS_EN fields */
  82. #define QUP_ERROR_OUTPUT_OVER_RUN BIT(5)
  83. #define QUP_ERROR_INPUT_UNDER_RUN BIT(4)
  84. #define QUP_ERROR_OUTPUT_UNDER_RUN BIT(3)
  85. #define QUP_ERROR_INPUT_OVER_RUN BIT(2)
  86. /* SPI_CONFIG fields */
  87. #define SPI_CONFIG_HS_MODE BIT(10)
  88. #define SPI_CONFIG_INPUT_FIRST BIT(9)
  89. #define SPI_CONFIG_LOOPBACK BIT(8)
  90. /* SPI_IO_CONTROL fields */
  91. #define SPI_IO_C_FORCE_CS BIT(11)
  92. #define SPI_IO_C_CLK_IDLE_HIGH BIT(10)
  93. #define SPI_IO_C_MX_CS_MODE BIT(8)
  94. #define SPI_IO_C_CS_N_POLARITY_0 BIT(4)
  95. #define SPI_IO_C_CS_SELECT(x) (((x) & 3) << 2)
  96. #define SPI_IO_C_CS_SELECT_MASK 0x000c
  97. #define SPI_IO_C_TRISTATE_CS BIT(1)
  98. #define SPI_IO_C_NO_TRI_STATE BIT(0)
  99. /* SPI_ERROR_FLAGS and SPI_ERROR_FLAGS_EN fields */
  100. #define SPI_ERROR_CLK_OVER_RUN BIT(1)
  101. #define SPI_ERROR_CLK_UNDER_RUN BIT(0)
  102. #define SPI_NUM_CHIPSELECTS 4
  103. /* high speed mode is when bus rate is greater then 26MHz */
  104. #define SPI_HS_MIN_RATE 26000000
  105. #define SPI_MAX_RATE 50000000
  106. #define SPI_DELAY_THRESHOLD 1
  107. #define SPI_DELAY_RETRY 10
  108. struct spi_qup_device {
  109. int select;
  110. u16 mode;
  111. };
  112. struct spi_qup {
  113. void __iomem *base;
  114. struct device *dev;
  115. struct clk *cclk; /* core clock */
  116. struct clk *iclk; /* interface clock */
  117. int irq;
  118. spinlock_t lock;
  119. int in_fifo_sz;
  120. int out_fifo_sz;
  121. int in_blk_sz;
  122. int out_blk_sz;
  123. struct spi_transfer *xfer;
  124. struct completion done;
  125. int error;
  126. int w_size; /* bytes per SPI word */
  127. int tx_bytes;
  128. int rx_bytes;
  129. };
  130. static inline bool spi_qup_is_valid_state(struct spi_qup *controller)
  131. {
  132. u32 opstate = readl_relaxed(controller->base + QUP_STATE);
  133. return opstate & QUP_STATE_VALID;
  134. }
  135. static int spi_qup_set_state(struct spi_qup *controller, u32 state)
  136. {
  137. unsigned long loop;
  138. u32 cur_state;
  139. loop = 0;
  140. while (!spi_qup_is_valid_state(controller)) {
  141. usleep_range(SPI_DELAY_THRESHOLD, SPI_DELAY_THRESHOLD * 2);
  142. if (++loop > SPI_DELAY_RETRY)
  143. return -EIO;
  144. }
  145. if (loop)
  146. dev_dbg(controller->dev, "invalid state for %ld,us %d\n",
  147. loop, state);
  148. cur_state = readl_relaxed(controller->base + QUP_STATE);
  149. /*
  150. * Per spec: for PAUSE_STATE to RESET_STATE, two writes
  151. * of (b10) are required
  152. */
  153. if (((cur_state & QUP_STATE_MASK) == QUP_STATE_PAUSE) &&
  154. (state == QUP_STATE_RESET)) {
  155. writel_relaxed(QUP_STATE_CLEAR, controller->base + QUP_STATE);
  156. writel_relaxed(QUP_STATE_CLEAR, controller->base + QUP_STATE);
  157. } else {
  158. cur_state &= ~QUP_STATE_MASK;
  159. cur_state |= state;
  160. writel_relaxed(cur_state, controller->base + QUP_STATE);
  161. }
  162. loop = 0;
  163. while (!spi_qup_is_valid_state(controller)) {
  164. usleep_range(SPI_DELAY_THRESHOLD, SPI_DELAY_THRESHOLD * 2);
  165. if (++loop > SPI_DELAY_RETRY)
  166. return -EIO;
  167. }
  168. return 0;
  169. }
  170. static void spi_qup_fifo_read(struct spi_qup *controller,
  171. struct spi_transfer *xfer)
  172. {
  173. u8 *rx_buf = xfer->rx_buf;
  174. u32 word, state;
  175. int idx, shift, w_size;
  176. w_size = controller->w_size;
  177. while (controller->rx_bytes < xfer->len) {
  178. state = readl_relaxed(controller->base + QUP_OPERATIONAL);
  179. if (0 == (state & QUP_OP_IN_FIFO_NOT_EMPTY))
  180. break;
  181. word = readl_relaxed(controller->base + QUP_INPUT_FIFO);
  182. if (!rx_buf) {
  183. controller->rx_bytes += w_size;
  184. continue;
  185. }
  186. for (idx = 0; idx < w_size; idx++, controller->rx_bytes++) {
  187. /*
  188. * The data format depends on bytes per SPI word:
  189. * 4 bytes: 0x12345678
  190. * 2 bytes: 0x00001234
  191. * 1 byte : 0x00000012
  192. */
  193. shift = BITS_PER_BYTE;
  194. shift *= (w_size - idx - 1);
  195. rx_buf[controller->rx_bytes] = word >> shift;
  196. }
  197. }
  198. }
  199. static void spi_qup_fifo_write(struct spi_qup *controller,
  200. struct spi_transfer *xfer)
  201. {
  202. const u8 *tx_buf = xfer->tx_buf;
  203. u32 word, state, data;
  204. int idx, w_size;
  205. w_size = controller->w_size;
  206. while (controller->tx_bytes < xfer->len) {
  207. state = readl_relaxed(controller->base + QUP_OPERATIONAL);
  208. if (state & QUP_OP_OUT_FIFO_FULL)
  209. break;
  210. word = 0;
  211. for (idx = 0; idx < w_size; idx++, controller->tx_bytes++) {
  212. if (!tx_buf) {
  213. controller->tx_bytes += w_size;
  214. break;
  215. }
  216. data = tx_buf[controller->tx_bytes];
  217. word |= data << (BITS_PER_BYTE * (3 - idx));
  218. }
  219. writel_relaxed(word, controller->base + QUP_OUTPUT_FIFO);
  220. }
  221. }
  222. static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id)
  223. {
  224. struct spi_qup *controller = dev_id;
  225. struct spi_transfer *xfer;
  226. u32 opflags, qup_err, spi_err;
  227. unsigned long flags;
  228. int error = 0;
  229. spin_lock_irqsave(&controller->lock, flags);
  230. xfer = controller->xfer;
  231. controller->xfer = NULL;
  232. spin_unlock_irqrestore(&controller->lock, flags);
  233. qup_err = readl_relaxed(controller->base + QUP_ERROR_FLAGS);
  234. spi_err = readl_relaxed(controller->base + SPI_ERROR_FLAGS);
  235. opflags = readl_relaxed(controller->base + QUP_OPERATIONAL);
  236. writel_relaxed(qup_err, controller->base + QUP_ERROR_FLAGS);
  237. writel_relaxed(spi_err, controller->base + SPI_ERROR_FLAGS);
  238. writel_relaxed(opflags, controller->base + QUP_OPERATIONAL);
  239. if (!xfer) {
  240. dev_err_ratelimited(controller->dev, "unexpected irq %x08 %x08 %x08\n",
  241. qup_err, spi_err, opflags);
  242. return IRQ_HANDLED;
  243. }
  244. if (qup_err) {
  245. if (qup_err & QUP_ERROR_OUTPUT_OVER_RUN)
  246. dev_warn(controller->dev, "OUTPUT_OVER_RUN\n");
  247. if (qup_err & QUP_ERROR_INPUT_UNDER_RUN)
  248. dev_warn(controller->dev, "INPUT_UNDER_RUN\n");
  249. if (qup_err & QUP_ERROR_OUTPUT_UNDER_RUN)
  250. dev_warn(controller->dev, "OUTPUT_UNDER_RUN\n");
  251. if (qup_err & QUP_ERROR_INPUT_OVER_RUN)
  252. dev_warn(controller->dev, "INPUT_OVER_RUN\n");
  253. error = -EIO;
  254. }
  255. if (spi_err) {
  256. if (spi_err & SPI_ERROR_CLK_OVER_RUN)
  257. dev_warn(controller->dev, "CLK_OVER_RUN\n");
  258. if (spi_err & SPI_ERROR_CLK_UNDER_RUN)
  259. dev_warn(controller->dev, "CLK_UNDER_RUN\n");
  260. error = -EIO;
  261. }
  262. if (opflags & QUP_OP_IN_SERVICE_FLAG)
  263. spi_qup_fifo_read(controller, xfer);
  264. if (opflags & QUP_OP_OUT_SERVICE_FLAG)
  265. spi_qup_fifo_write(controller, xfer);
  266. spin_lock_irqsave(&controller->lock, flags);
  267. controller->error = error;
  268. controller->xfer = xfer;
  269. spin_unlock_irqrestore(&controller->lock, flags);
  270. if (controller->rx_bytes == xfer->len || error)
  271. complete(&controller->done);
  272. return IRQ_HANDLED;
  273. }
  274. /* set clock freq ... bits per word */
  275. static int spi_qup_io_config(struct spi_qup *controller,
  276. struct spi_qup_device *chip,
  277. struct spi_transfer *xfer)
  278. {
  279. u32 config, iomode, mode;
  280. int ret, n_words, w_size;
  281. if (chip->mode & SPI_LOOP && xfer->len > controller->in_fifo_sz) {
  282. dev_err(controller->dev, "too big size for loopback %d > %d\n",
  283. xfer->len, controller->in_fifo_sz);
  284. return -EIO;
  285. }
  286. ret = clk_set_rate(controller->cclk, xfer->speed_hz);
  287. if (ret) {
  288. dev_err(controller->dev, "fail to set frequency %d",
  289. xfer->speed_hz);
  290. return -EIO;
  291. }
  292. if (spi_qup_set_state(controller, QUP_STATE_RESET)) {
  293. dev_err(controller->dev, "cannot set RESET state\n");
  294. return -EIO;
  295. }
  296. w_size = 4;
  297. if (xfer->bits_per_word <= 8)
  298. w_size = 1;
  299. else if (xfer->bits_per_word <= 16)
  300. w_size = 2;
  301. n_words = xfer->len / w_size;
  302. controller->w_size = w_size;
  303. if (n_words <= controller->in_fifo_sz) {
  304. mode = QUP_IO_M_MODE_FIFO;
  305. writel_relaxed(n_words, controller->base + QUP_MX_READ_CNT);
  306. writel_relaxed(n_words, controller->base + QUP_MX_WRITE_CNT);
  307. /* must be zero for FIFO */
  308. writel_relaxed(0, controller->base + QUP_MX_INPUT_CNT);
  309. writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT);
  310. } else {
  311. mode = QUP_IO_M_MODE_BLOCK;
  312. writel_relaxed(n_words, controller->base + QUP_MX_INPUT_CNT);
  313. writel_relaxed(n_words, controller->base + QUP_MX_OUTPUT_CNT);
  314. /* must be zero for BLOCK and BAM */
  315. writel_relaxed(0, controller->base + QUP_MX_READ_CNT);
  316. writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT);
  317. }
  318. iomode = readl_relaxed(controller->base + QUP_IO_M_MODES);
  319. /* Set input and output transfer mode */
  320. iomode &= ~(QUP_IO_M_INPUT_MODE_MASK | QUP_IO_M_OUTPUT_MODE_MASK);
  321. iomode &= ~(QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN);
  322. iomode |= (mode << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT);
  323. iomode |= (mode << QUP_IO_M_INPUT_MODE_MASK_SHIFT);
  324. writel_relaxed(iomode, controller->base + QUP_IO_M_MODES);
  325. config = readl_relaxed(controller->base + SPI_CONFIG);
  326. if (chip->mode & SPI_LOOP)
  327. config |= SPI_CONFIG_LOOPBACK;
  328. else
  329. config &= ~SPI_CONFIG_LOOPBACK;
  330. if (chip->mode & SPI_CPHA)
  331. config &= ~SPI_CONFIG_INPUT_FIRST;
  332. else
  333. config |= SPI_CONFIG_INPUT_FIRST;
  334. /*
  335. * HS_MODE improves signal stability for spi-clk high rates,
  336. * but is invalid in loop back mode.
  337. */
  338. if ((xfer->speed_hz >= SPI_HS_MIN_RATE) && !(chip->mode & SPI_LOOP))
  339. config |= SPI_CONFIG_HS_MODE;
  340. else
  341. config &= ~SPI_CONFIG_HS_MODE;
  342. writel_relaxed(config, controller->base + SPI_CONFIG);
  343. config = readl_relaxed(controller->base + QUP_CONFIG);
  344. config &= ~(QUP_CONFIG_NO_INPUT | QUP_CONFIG_NO_OUTPUT | QUP_CONFIG_N);
  345. config |= xfer->bits_per_word - 1;
  346. config |= QUP_CONFIG_SPI_MODE;
  347. writel_relaxed(config, controller->base + QUP_CONFIG);
  348. writel_relaxed(0, controller->base + QUP_OPERATIONAL_MASK);
  349. return 0;
  350. }
  351. static void spi_qup_set_cs(struct spi_device *spi, bool enable)
  352. {
  353. struct spi_qup *controller = spi_master_get_devdata(spi->master);
  354. struct spi_qup_device *chip = spi_get_ctldata(spi);
  355. u32 iocontol, mask;
  356. iocontol = readl_relaxed(controller->base + SPI_IO_CONTROL);
  357. /* Disable auto CS toggle and use manual */
  358. iocontol &= ~SPI_IO_C_MX_CS_MODE;
  359. iocontol |= SPI_IO_C_FORCE_CS;
  360. iocontol &= ~SPI_IO_C_CS_SELECT_MASK;
  361. iocontol |= SPI_IO_C_CS_SELECT(chip->select);
  362. mask = SPI_IO_C_CS_N_POLARITY_0 << chip->select;
  363. if (enable)
  364. iocontol |= mask;
  365. else
  366. iocontol &= ~mask;
  367. writel_relaxed(iocontol, controller->base + SPI_IO_CONTROL);
  368. }
  369. static int spi_qup_transfer_one(struct spi_master *master,
  370. struct spi_device *spi,
  371. struct spi_transfer *xfer)
  372. {
  373. struct spi_qup *controller = spi_master_get_devdata(master);
  374. struct spi_qup_device *chip = spi_get_ctldata(spi);
  375. unsigned long timeout, flags;
  376. int ret = -EIO;
  377. ret = spi_qup_io_config(controller, chip, xfer);
  378. if (ret)
  379. return ret;
  380. timeout = DIV_ROUND_UP(xfer->speed_hz, MSEC_PER_SEC);
  381. timeout = DIV_ROUND_UP(xfer->len * 8, timeout);
  382. timeout = 100 * msecs_to_jiffies(timeout);
  383. reinit_completion(&controller->done);
  384. spin_lock_irqsave(&controller->lock, flags);
  385. controller->xfer = xfer;
  386. controller->error = 0;
  387. controller->rx_bytes = 0;
  388. controller->tx_bytes = 0;
  389. spin_unlock_irqrestore(&controller->lock, flags);
  390. if (spi_qup_set_state(controller, QUP_STATE_RUN)) {
  391. dev_warn(controller->dev, "cannot set RUN state\n");
  392. goto exit;
  393. }
  394. if (spi_qup_set_state(controller, QUP_STATE_PAUSE)) {
  395. dev_warn(controller->dev, "cannot set PAUSE state\n");
  396. goto exit;
  397. }
  398. spi_qup_fifo_write(controller, xfer);
  399. if (spi_qup_set_state(controller, QUP_STATE_RUN)) {
  400. dev_warn(controller->dev, "cannot set EXECUTE state\n");
  401. goto exit;
  402. }
  403. if (!wait_for_completion_timeout(&controller->done, timeout))
  404. ret = -ETIMEDOUT;
  405. exit:
  406. spi_qup_set_state(controller, QUP_STATE_RESET);
  407. spin_lock_irqsave(&controller->lock, flags);
  408. controller->xfer = NULL;
  409. if (!ret)
  410. ret = controller->error;
  411. spin_unlock_irqrestore(&controller->lock, flags);
  412. return ret;
  413. }
  414. static int spi_qup_setup(struct spi_device *spi)
  415. {
  416. struct spi_qup *controller = spi_master_get_devdata(spi->master);
  417. struct spi_qup_device *chip = spi_get_ctldata(spi);
  418. if (!chip) {
  419. /* First setup */
  420. chip = kzalloc(sizeof(*chip), GFP_KERNEL);
  421. if (!chip) {
  422. dev_err(controller->dev, "no memory for chip data\n");
  423. return -ENOMEM;
  424. }
  425. chip->mode = spi->mode;
  426. chip->select = spi->chip_select;
  427. spi_set_ctldata(spi, chip);
  428. }
  429. return 0;
  430. }
  431. static void spi_qup_cleanup(struct spi_device *spi)
  432. {
  433. struct spi_qup_device *chip = spi_get_ctldata(spi);
  434. if (!chip)
  435. return;
  436. spi_set_ctldata(spi, NULL);
  437. kfree(chip);
  438. }
  439. static int spi_qup_probe(struct platform_device *pdev)
  440. {
  441. struct spi_master *master;
  442. struct clk *iclk, *cclk;
  443. struct spi_qup *controller;
  444. struct resource *res;
  445. struct device *dev;
  446. void __iomem *base;
  447. u32 data, max_freq, iomode;
  448. int ret, irq, size;
  449. dev = &pdev->dev;
  450. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  451. base = devm_ioremap_resource(dev, res);
  452. if (IS_ERR(base))
  453. return PTR_ERR(base);
  454. irq = platform_get_irq(pdev, 0);
  455. if (irq < 0)
  456. return irq;
  457. cclk = devm_clk_get(dev, "core");
  458. if (IS_ERR(cclk))
  459. return PTR_ERR(cclk);
  460. iclk = devm_clk_get(dev, "iface");
  461. if (IS_ERR(iclk))
  462. return PTR_ERR(iclk);
  463. /* This is optional parameter */
  464. if (of_property_read_u32(dev->of_node, "spi-max-frequency", &max_freq))
  465. max_freq = SPI_MAX_RATE;
  466. if (!max_freq || max_freq > SPI_MAX_RATE) {
  467. dev_err(dev, "invalid clock frequency %d\n", max_freq);
  468. return -ENXIO;
  469. }
  470. ret = clk_prepare_enable(cclk);
  471. if (ret) {
  472. dev_err(dev, "cannot enable core clock\n");
  473. return ret;
  474. }
  475. ret = clk_prepare_enable(iclk);
  476. if (ret) {
  477. clk_disable_unprepare(cclk);
  478. dev_err(dev, "cannot enable iface clock\n");
  479. return ret;
  480. }
  481. data = readl_relaxed(base + QUP_HW_VERSION);
  482. if (data < QUP_HW_VERSION_2_1_1) {
  483. clk_disable_unprepare(cclk);
  484. clk_disable_unprepare(iclk);
  485. dev_err(dev, "v.%08x is not supported\n", data);
  486. return -ENXIO;
  487. }
  488. master = spi_alloc_master(dev, sizeof(struct spi_qup));
  489. if (!master) {
  490. clk_disable_unprepare(cclk);
  491. clk_disable_unprepare(iclk);
  492. dev_err(dev, "cannot allocate master\n");
  493. return -ENOMEM;
  494. }
  495. master->bus_num = pdev->id;
  496. master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
  497. master->num_chipselect = SPI_NUM_CHIPSELECTS;
  498. master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
  499. master->max_speed_hz = max_freq;
  500. master->setup = spi_qup_setup;
  501. master->cleanup = spi_qup_cleanup;
  502. master->set_cs = spi_qup_set_cs;
  503. master->transfer_one = spi_qup_transfer_one;
  504. master->dev.of_node = pdev->dev.of_node;
  505. master->auto_runtime_pm = true;
  506. platform_set_drvdata(pdev, master);
  507. controller = spi_master_get_devdata(master);
  508. controller->dev = dev;
  509. controller->base = base;
  510. controller->iclk = iclk;
  511. controller->cclk = cclk;
  512. controller->irq = irq;
  513. spin_lock_init(&controller->lock);
  514. init_completion(&controller->done);
  515. iomode = readl_relaxed(base + QUP_IO_M_MODES);
  516. size = QUP_IO_M_OUTPUT_BLOCK_SIZE(iomode);
  517. if (size)
  518. controller->out_blk_sz = size * 16;
  519. else
  520. controller->out_blk_sz = 4;
  521. size = QUP_IO_M_INPUT_BLOCK_SIZE(iomode);
  522. if (size)
  523. controller->in_blk_sz = size * 16;
  524. else
  525. controller->in_blk_sz = 4;
  526. size = QUP_IO_M_OUTPUT_FIFO_SIZE(iomode);
  527. controller->out_fifo_sz = controller->out_blk_sz * (2 << size);
  528. size = QUP_IO_M_INPUT_FIFO_SIZE(iomode);
  529. controller->in_fifo_sz = controller->in_blk_sz * (2 << size);
  530. dev_info(dev, "v.%08x IN:block:%d, fifo:%d, OUT:block:%d, fifo:%d\n",
  531. data, controller->in_blk_sz, controller->in_fifo_sz,
  532. controller->out_blk_sz, controller->out_fifo_sz);
  533. writel_relaxed(1, base + QUP_SW_RESET);
  534. ret = spi_qup_set_state(controller, QUP_STATE_RESET);
  535. if (ret) {
  536. dev_err(dev, "cannot set RESET state\n");
  537. goto error;
  538. }
  539. writel_relaxed(0, base + QUP_OPERATIONAL);
  540. writel_relaxed(0, base + QUP_IO_M_MODES);
  541. writel_relaxed(0, base + QUP_OPERATIONAL_MASK);
  542. writel_relaxed(SPI_ERROR_CLK_UNDER_RUN | SPI_ERROR_CLK_OVER_RUN,
  543. base + SPI_ERROR_FLAGS_EN);
  544. writel_relaxed(0, base + SPI_CONFIG);
  545. writel_relaxed(SPI_IO_C_NO_TRI_STATE, base + SPI_IO_CONTROL);
  546. ret = devm_request_irq(dev, irq, spi_qup_qup_irq,
  547. IRQF_TRIGGER_HIGH, pdev->name, controller);
  548. if (ret)
  549. goto error;
  550. ret = devm_spi_register_master(dev, master);
  551. if (ret)
  552. goto error;
  553. pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
  554. pm_runtime_use_autosuspend(dev);
  555. pm_runtime_set_active(dev);
  556. pm_runtime_enable(dev);
  557. return 0;
  558. error:
  559. clk_disable_unprepare(cclk);
  560. clk_disable_unprepare(iclk);
  561. spi_master_put(master);
  562. return ret;
  563. }
  564. #ifdef CONFIG_PM_RUNTIME
  565. static int spi_qup_pm_suspend_runtime(struct device *device)
  566. {
  567. struct spi_master *master = dev_get_drvdata(device);
  568. struct spi_qup *controller = spi_master_get_devdata(master);
  569. u32 config;
  570. /* Enable clocks auto gaiting */
  571. config = readl(controller->base + QUP_CONFIG);
  572. config |= QUP_CONFIG_CLOCK_AUTO_GATE;
  573. writel_relaxed(config, controller->base + QUP_CONFIG);
  574. return 0;
  575. }
  576. static int spi_qup_pm_resume_runtime(struct device *device)
  577. {
  578. struct spi_master *master = dev_get_drvdata(device);
  579. struct spi_qup *controller = spi_master_get_devdata(master);
  580. u32 config;
  581. /* Disable clocks auto gaiting */
  582. config = readl_relaxed(controller->base + QUP_CONFIG);
  583. config &= ~QUP_CONFIG_CLOCK_AUTO_GATE;
  584. writel_relaxed(config, controller->base + QUP_CONFIG);
  585. return 0;
  586. }
  587. #endif /* CONFIG_PM_RUNTIME */
  588. #ifdef CONFIG_PM_SLEEP
  589. static int spi_qup_suspend(struct device *device)
  590. {
  591. struct spi_master *master = dev_get_drvdata(device);
  592. struct spi_qup *controller = spi_master_get_devdata(master);
  593. int ret;
  594. ret = spi_master_suspend(master);
  595. if (ret)
  596. return ret;
  597. ret = spi_qup_set_state(controller, QUP_STATE_RESET);
  598. if (ret)
  599. return ret;
  600. clk_disable_unprepare(controller->cclk);
  601. clk_disable_unprepare(controller->iclk);
  602. return 0;
  603. }
  604. static int spi_qup_resume(struct device *device)
  605. {
  606. struct spi_master *master = dev_get_drvdata(device);
  607. struct spi_qup *controller = spi_master_get_devdata(master);
  608. int ret;
  609. ret = clk_prepare_enable(controller->iclk);
  610. if (ret)
  611. return ret;
  612. ret = clk_prepare_enable(controller->cclk);
  613. if (ret)
  614. return ret;
  615. ret = spi_qup_set_state(controller, QUP_STATE_RESET);
  616. if (ret)
  617. return ret;
  618. return spi_master_resume(master);
  619. }
  620. #endif /* CONFIG_PM_SLEEP */
  621. static int spi_qup_remove(struct platform_device *pdev)
  622. {
  623. struct spi_master *master = dev_get_drvdata(&pdev->dev);
  624. struct spi_qup *controller = spi_master_get_devdata(master);
  625. int ret;
  626. ret = pm_runtime_get_sync(&pdev->dev);
  627. if (ret)
  628. return ret;
  629. ret = spi_qup_set_state(controller, QUP_STATE_RESET);
  630. if (ret)
  631. return ret;
  632. clk_disable_unprepare(controller->cclk);
  633. clk_disable_unprepare(controller->iclk);
  634. pm_runtime_put_noidle(&pdev->dev);
  635. pm_runtime_disable(&pdev->dev);
  636. return 0;
  637. }
  638. static struct of_device_id spi_qup_dt_match[] = {
  639. { .compatible = "qcom,spi-qup-v2.1.1", },
  640. { .compatible = "qcom,spi-qup-v2.2.1", },
  641. { }
  642. };
  643. MODULE_DEVICE_TABLE(of, spi_qup_dt_match);
  644. static const struct dev_pm_ops spi_qup_dev_pm_ops = {
  645. SET_SYSTEM_SLEEP_PM_OPS(spi_qup_suspend, spi_qup_resume)
  646. SET_RUNTIME_PM_OPS(spi_qup_pm_suspend_runtime,
  647. spi_qup_pm_resume_runtime,
  648. NULL)
  649. };
  650. static struct platform_driver spi_qup_driver = {
  651. .driver = {
  652. .name = "spi_qup",
  653. .owner = THIS_MODULE,
  654. .pm = &spi_qup_dev_pm_ops,
  655. .of_match_table = spi_qup_dt_match,
  656. },
  657. .probe = spi_qup_probe,
  658. .remove = spi_qup_remove,
  659. };
  660. module_platform_driver(spi_qup_driver);
  661. MODULE_LICENSE("GPL v2");
  662. MODULE_VERSION("0.4");
  663. MODULE_ALIAS("platform:spi_qup");