spi-qup.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778
  1. /*
  2. * Copyright (c) 2008-2014, The Linux foundation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License rev 2 and
  6. * only rev 2 as published by the free Software foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or fITNESS fOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. */
  13. #include <linux/clk.h>
  14. #include <linux/delay.h>
  15. #include <linux/err.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/io.h>
  18. #include <linux/list.h>
  19. #include <linux/module.h>
  20. #include <linux/of.h>
  21. #include <linux/platform_device.h>
  22. #include <linux/pm_runtime.h>
  23. #include <linux/spi/spi.h>
  24. #define QUP_CONFIG 0x0000
  25. #define QUP_STATE 0x0004
  26. #define QUP_IO_M_MODES 0x0008
  27. #define QUP_SW_RESET 0x000c
  28. #define QUP_OPERATIONAL 0x0018
  29. #define QUP_ERROR_FLAGS 0x001c
  30. #define QUP_ERROR_FLAGS_EN 0x0020
  31. #define QUP_OPERATIONAL_MASK 0x0028
  32. #define QUP_HW_VERSION 0x0030
  33. #define QUP_MX_OUTPUT_CNT 0x0100
  34. #define QUP_OUTPUT_FIFO 0x0110
  35. #define QUP_MX_WRITE_CNT 0x0150
  36. #define QUP_MX_INPUT_CNT 0x0200
  37. #define QUP_MX_READ_CNT 0x0208
  38. #define QUP_INPUT_FIFO 0x0218
  39. #define SPI_CONFIG 0x0300
  40. #define SPI_IO_CONTROL 0x0304
  41. #define SPI_ERROR_FLAGS 0x0308
  42. #define SPI_ERROR_FLAGS_EN 0x030c
  43. /* QUP_CONFIG fields */
  44. #define QUP_CONFIG_SPI_MODE (1 << 8)
  45. #define QUP_CONFIG_CLOCK_AUTO_GATE BIT(13)
  46. #define QUP_CONFIG_NO_INPUT BIT(7)
  47. #define QUP_CONFIG_NO_OUTPUT BIT(6)
  48. #define QUP_CONFIG_N 0x001f
  49. /* QUP_STATE fields */
  50. #define QUP_STATE_VALID BIT(2)
  51. #define QUP_STATE_RESET 0
  52. #define QUP_STATE_RUN 1
  53. #define QUP_STATE_PAUSE 3
  54. #define QUP_STATE_MASK 3
  55. #define QUP_STATE_CLEAR 2
  56. #define QUP_HW_VERSION_2_1_1 0x20010001
  57. /* QUP_IO_M_MODES fields */
  58. #define QUP_IO_M_PACK_EN BIT(15)
  59. #define QUP_IO_M_UNPACK_EN BIT(14)
  60. #define QUP_IO_M_INPUT_MODE_MASK_SHIFT 12
  61. #define QUP_IO_M_OUTPUT_MODE_MASK_SHIFT 10
  62. #define QUP_IO_M_INPUT_MODE_MASK (3 << QUP_IO_M_INPUT_MODE_MASK_SHIFT)
  63. #define QUP_IO_M_OUTPUT_MODE_MASK (3 << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT)
  64. #define QUP_IO_M_OUTPUT_BLOCK_SIZE(x) (((x) & (0x03 << 0)) >> 0)
  65. #define QUP_IO_M_OUTPUT_FIFO_SIZE(x) (((x) & (0x07 << 2)) >> 2)
  66. #define QUP_IO_M_INPUT_BLOCK_SIZE(x) (((x) & (0x03 << 5)) >> 5)
  67. #define QUP_IO_M_INPUT_FIFO_SIZE(x) (((x) & (0x07 << 7)) >> 7)
  68. #define QUP_IO_M_MODE_FIFO 0
  69. #define QUP_IO_M_MODE_BLOCK 1
  70. #define QUP_IO_M_MODE_DMOV 2
  71. #define QUP_IO_M_MODE_BAM 3
  72. /* QUP_OPERATIONAL fields */
  73. #define QUP_OP_MAX_INPUT_DONE_FLAG BIT(11)
  74. #define QUP_OP_MAX_OUTPUT_DONE_FLAG BIT(10)
  75. #define QUP_OP_IN_SERVICE_FLAG BIT(9)
  76. #define QUP_OP_OUT_SERVICE_FLAG BIT(8)
  77. #define QUP_OP_IN_FIFO_FULL BIT(7)
  78. #define QUP_OP_OUT_FIFO_FULL BIT(6)
  79. #define QUP_OP_IN_FIFO_NOT_EMPTY BIT(5)
  80. #define QUP_OP_OUT_FIFO_NOT_EMPTY BIT(4)
  81. /* QUP_ERROR_FLAGS and QUP_ERROR_FLAGS_EN fields */
  82. #define QUP_ERROR_OUTPUT_OVER_RUN BIT(5)
  83. #define QUP_ERROR_INPUT_UNDER_RUN BIT(4)
  84. #define QUP_ERROR_OUTPUT_UNDER_RUN BIT(3)
  85. #define QUP_ERROR_INPUT_OVER_RUN BIT(2)
  86. /* SPI_CONFIG fields */
  87. #define SPI_CONFIG_HS_MODE BIT(10)
  88. #define SPI_CONFIG_INPUT_FIRST BIT(9)
  89. #define SPI_CONFIG_LOOPBACK BIT(8)
  90. /* SPI_IO_CONTROL fields */
  91. #define SPI_IO_C_FORCE_CS BIT(11)
  92. #define SPI_IO_C_CLK_IDLE_HIGH BIT(10)
  93. #define SPI_IO_C_MX_CS_MODE BIT(8)
  94. #define SPI_IO_C_CS_N_POLARITY_0 BIT(4)
  95. #define SPI_IO_C_CS_SELECT(x) (((x) & 3) << 2)
  96. #define SPI_IO_C_CS_SELECT_MASK 0x000c
  97. #define SPI_IO_C_TRISTATE_CS BIT(1)
  98. #define SPI_IO_C_NO_TRI_STATE BIT(0)
  99. /* SPI_ERROR_FLAGS and SPI_ERROR_FLAGS_EN fields */
  100. #define SPI_ERROR_CLK_OVER_RUN BIT(1)
  101. #define SPI_ERROR_CLK_UNDER_RUN BIT(0)
  102. #define SPI_NUM_CHIPSELECTS 4
  103. /* high speed mode is when bus rate is greater then 26MHz */
  104. #define SPI_HS_MIN_RATE 26000000
  105. #define SPI_MAX_RATE 50000000
  106. #define SPI_DELAY_THRESHOLD 1
  107. #define SPI_DELAY_RETRY 10
  108. struct spi_qup {
  109. void __iomem *base;
  110. struct device *dev;
  111. struct clk *cclk; /* core clock */
  112. struct clk *iclk; /* interface clock */
  113. int irq;
  114. spinlock_t lock;
  115. int in_fifo_sz;
  116. int out_fifo_sz;
  117. int in_blk_sz;
  118. int out_blk_sz;
  119. struct spi_transfer *xfer;
  120. struct completion done;
  121. int error;
  122. int w_size; /* bytes per SPI word */
  123. int tx_bytes;
  124. int rx_bytes;
  125. int qup_v1;
  126. };
  127. static inline bool spi_qup_is_valid_state(struct spi_qup *controller)
  128. {
  129. u32 opstate = readl_relaxed(controller->base + QUP_STATE);
  130. return opstate & QUP_STATE_VALID;
  131. }
  132. static int spi_qup_set_state(struct spi_qup *controller, u32 state)
  133. {
  134. unsigned long loop;
  135. u32 cur_state;
  136. loop = 0;
  137. while (!spi_qup_is_valid_state(controller)) {
  138. usleep_range(SPI_DELAY_THRESHOLD, SPI_DELAY_THRESHOLD * 2);
  139. if (++loop > SPI_DELAY_RETRY)
  140. return -EIO;
  141. }
  142. if (loop)
  143. dev_dbg(controller->dev, "invalid state for %ld,us %d\n",
  144. loop, state);
  145. cur_state = readl_relaxed(controller->base + QUP_STATE);
  146. /*
  147. * Per spec: for PAUSE_STATE to RESET_STATE, two writes
  148. * of (b10) are required
  149. */
  150. if (((cur_state & QUP_STATE_MASK) == QUP_STATE_PAUSE) &&
  151. (state == QUP_STATE_RESET)) {
  152. writel_relaxed(QUP_STATE_CLEAR, controller->base + QUP_STATE);
  153. writel_relaxed(QUP_STATE_CLEAR, controller->base + QUP_STATE);
  154. } else {
  155. cur_state &= ~QUP_STATE_MASK;
  156. cur_state |= state;
  157. writel_relaxed(cur_state, controller->base + QUP_STATE);
  158. }
  159. loop = 0;
  160. while (!spi_qup_is_valid_state(controller)) {
  161. usleep_range(SPI_DELAY_THRESHOLD, SPI_DELAY_THRESHOLD * 2);
  162. if (++loop > SPI_DELAY_RETRY)
  163. return -EIO;
  164. }
  165. return 0;
  166. }
  167. static void spi_qup_fifo_read(struct spi_qup *controller,
  168. struct spi_transfer *xfer)
  169. {
  170. u8 *rx_buf = xfer->rx_buf;
  171. u32 word, state;
  172. int idx, shift, w_size;
  173. w_size = controller->w_size;
  174. while (controller->rx_bytes < xfer->len) {
  175. state = readl_relaxed(controller->base + QUP_OPERATIONAL);
  176. if (0 == (state & QUP_OP_IN_FIFO_NOT_EMPTY))
  177. break;
  178. word = readl_relaxed(controller->base + QUP_INPUT_FIFO);
  179. if (!rx_buf) {
  180. controller->rx_bytes += w_size;
  181. continue;
  182. }
  183. for (idx = 0; idx < w_size; idx++, controller->rx_bytes++) {
  184. /*
  185. * The data format depends on bytes per SPI word:
  186. * 4 bytes: 0x12345678
  187. * 2 bytes: 0x00001234
  188. * 1 byte : 0x00000012
  189. */
  190. shift = BITS_PER_BYTE;
  191. shift *= (w_size - idx - 1);
  192. rx_buf[controller->rx_bytes] = word >> shift;
  193. }
  194. }
  195. }
  196. static void spi_qup_fifo_write(struct spi_qup *controller,
  197. struct spi_transfer *xfer)
  198. {
  199. const u8 *tx_buf = xfer->tx_buf;
  200. u32 word, state, data;
  201. int idx, w_size;
  202. w_size = controller->w_size;
  203. while (controller->tx_bytes < xfer->len) {
  204. state = readl_relaxed(controller->base + QUP_OPERATIONAL);
  205. if (state & QUP_OP_OUT_FIFO_FULL)
  206. break;
  207. word = 0;
  208. for (idx = 0; idx < w_size; idx++, controller->tx_bytes++) {
  209. if (!tx_buf) {
  210. controller->tx_bytes += w_size;
  211. break;
  212. }
  213. data = tx_buf[controller->tx_bytes];
  214. word |= data << (BITS_PER_BYTE * (3 - idx));
  215. }
  216. writel_relaxed(word, controller->base + QUP_OUTPUT_FIFO);
  217. }
  218. }
  219. static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id)
  220. {
  221. struct spi_qup *controller = dev_id;
  222. struct spi_transfer *xfer;
  223. u32 opflags, qup_err, spi_err;
  224. unsigned long flags;
  225. int error = 0;
  226. spin_lock_irqsave(&controller->lock, flags);
  227. xfer = controller->xfer;
  228. controller->xfer = NULL;
  229. spin_unlock_irqrestore(&controller->lock, flags);
  230. qup_err = readl_relaxed(controller->base + QUP_ERROR_FLAGS);
  231. spi_err = readl_relaxed(controller->base + SPI_ERROR_FLAGS);
  232. opflags = readl_relaxed(controller->base + QUP_OPERATIONAL);
  233. writel_relaxed(qup_err, controller->base + QUP_ERROR_FLAGS);
  234. writel_relaxed(spi_err, controller->base + SPI_ERROR_FLAGS);
  235. writel_relaxed(opflags, controller->base + QUP_OPERATIONAL);
  236. if (!xfer) {
  237. dev_err_ratelimited(controller->dev, "unexpected irq %08x %08x %08x\n",
  238. qup_err, spi_err, opflags);
  239. return IRQ_HANDLED;
  240. }
  241. if (qup_err) {
  242. if (qup_err & QUP_ERROR_OUTPUT_OVER_RUN)
  243. dev_warn(controller->dev, "OUTPUT_OVER_RUN\n");
  244. if (qup_err & QUP_ERROR_INPUT_UNDER_RUN)
  245. dev_warn(controller->dev, "INPUT_UNDER_RUN\n");
  246. if (qup_err & QUP_ERROR_OUTPUT_UNDER_RUN)
  247. dev_warn(controller->dev, "OUTPUT_UNDER_RUN\n");
  248. if (qup_err & QUP_ERROR_INPUT_OVER_RUN)
  249. dev_warn(controller->dev, "INPUT_OVER_RUN\n");
  250. error = -EIO;
  251. }
  252. if (spi_err) {
  253. if (spi_err & SPI_ERROR_CLK_OVER_RUN)
  254. dev_warn(controller->dev, "CLK_OVER_RUN\n");
  255. if (spi_err & SPI_ERROR_CLK_UNDER_RUN)
  256. dev_warn(controller->dev, "CLK_UNDER_RUN\n");
  257. error = -EIO;
  258. }
  259. if (opflags & QUP_OP_IN_SERVICE_FLAG)
  260. spi_qup_fifo_read(controller, xfer);
  261. if (opflags & QUP_OP_OUT_SERVICE_FLAG)
  262. spi_qup_fifo_write(controller, xfer);
  263. spin_lock_irqsave(&controller->lock, flags);
  264. controller->error = error;
  265. controller->xfer = xfer;
  266. spin_unlock_irqrestore(&controller->lock, flags);
  267. if (controller->rx_bytes == xfer->len || error)
  268. complete(&controller->done);
  269. return IRQ_HANDLED;
  270. }
  271. /* set clock freq ... bits per word */
  272. static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer)
  273. {
  274. struct spi_qup *controller = spi_master_get_devdata(spi->master);
  275. u32 config, iomode, mode, control;
  276. int ret, n_words, w_size;
  277. if (spi->mode & SPI_LOOP && xfer->len > controller->in_fifo_sz) {
  278. dev_err(controller->dev, "too big size for loopback %d > %d\n",
  279. xfer->len, controller->in_fifo_sz);
  280. return -EIO;
  281. }
  282. ret = clk_set_rate(controller->cclk, xfer->speed_hz);
  283. if (ret) {
  284. dev_err(controller->dev, "fail to set frequency %d",
  285. xfer->speed_hz);
  286. return -EIO;
  287. }
  288. if (spi_qup_set_state(controller, QUP_STATE_RESET)) {
  289. dev_err(controller->dev, "cannot set RESET state\n");
  290. return -EIO;
  291. }
  292. w_size = 4;
  293. if (xfer->bits_per_word <= 8)
  294. w_size = 1;
  295. else if (xfer->bits_per_word <= 16)
  296. w_size = 2;
  297. n_words = xfer->len / w_size;
  298. controller->w_size = w_size;
  299. if (n_words <= (controller->in_fifo_sz / sizeof(u32))) {
  300. mode = QUP_IO_M_MODE_FIFO;
  301. writel_relaxed(n_words, controller->base + QUP_MX_READ_CNT);
  302. writel_relaxed(n_words, controller->base + QUP_MX_WRITE_CNT);
  303. /* must be zero for FIFO */
  304. writel_relaxed(0, controller->base + QUP_MX_INPUT_CNT);
  305. writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT);
  306. } else {
  307. mode = QUP_IO_M_MODE_BLOCK;
  308. writel_relaxed(n_words, controller->base + QUP_MX_INPUT_CNT);
  309. writel_relaxed(n_words, controller->base + QUP_MX_OUTPUT_CNT);
  310. /* must be zero for BLOCK and BAM */
  311. writel_relaxed(0, controller->base + QUP_MX_READ_CNT);
  312. writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT);
  313. }
  314. iomode = readl_relaxed(controller->base + QUP_IO_M_MODES);
  315. /* Set input and output transfer mode */
  316. iomode &= ~(QUP_IO_M_INPUT_MODE_MASK | QUP_IO_M_OUTPUT_MODE_MASK);
  317. iomode &= ~(QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN);
  318. iomode |= (mode << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT);
  319. iomode |= (mode << QUP_IO_M_INPUT_MODE_MASK_SHIFT);
  320. writel_relaxed(iomode, controller->base + QUP_IO_M_MODES);
  321. control = readl_relaxed(controller->base + SPI_IO_CONTROL);
  322. if (spi->mode & SPI_CPOL)
  323. control |= SPI_IO_C_CLK_IDLE_HIGH;
  324. else
  325. control &= ~SPI_IO_C_CLK_IDLE_HIGH;
  326. writel_relaxed(control, controller->base + SPI_IO_CONTROL);
  327. config = readl_relaxed(controller->base + SPI_CONFIG);
  328. if (spi->mode & SPI_LOOP)
  329. config |= SPI_CONFIG_LOOPBACK;
  330. else
  331. config &= ~SPI_CONFIG_LOOPBACK;
  332. if (spi->mode & SPI_CPHA)
  333. config &= ~SPI_CONFIG_INPUT_FIRST;
  334. else
  335. config |= SPI_CONFIG_INPUT_FIRST;
  336. /*
  337. * HS_MODE improves signal stability for spi-clk high rates,
  338. * but is invalid in loop back mode.
  339. */
  340. if ((xfer->speed_hz >= SPI_HS_MIN_RATE) && !(spi->mode & SPI_LOOP))
  341. config |= SPI_CONFIG_HS_MODE;
  342. else
  343. config &= ~SPI_CONFIG_HS_MODE;
  344. writel_relaxed(config, controller->base + SPI_CONFIG);
  345. config = readl_relaxed(controller->base + QUP_CONFIG);
  346. config &= ~(QUP_CONFIG_NO_INPUT | QUP_CONFIG_NO_OUTPUT | QUP_CONFIG_N);
  347. config |= xfer->bits_per_word - 1;
  348. config |= QUP_CONFIG_SPI_MODE;
  349. writel_relaxed(config, controller->base + QUP_CONFIG);
  350. /* only write to OPERATIONAL_MASK when register is present */
  351. if (!controller->qup_v1)
  352. writel_relaxed(0, controller->base + QUP_OPERATIONAL_MASK);
  353. return 0;
  354. }
  355. static int spi_qup_transfer_one(struct spi_master *master,
  356. struct spi_device *spi,
  357. struct spi_transfer *xfer)
  358. {
  359. struct spi_qup *controller = spi_master_get_devdata(master);
  360. unsigned long timeout, flags;
  361. int ret = -EIO;
  362. ret = spi_qup_io_config(spi, xfer);
  363. if (ret)
  364. return ret;
  365. timeout = DIV_ROUND_UP(xfer->speed_hz, MSEC_PER_SEC);
  366. timeout = DIV_ROUND_UP(xfer->len * 8, timeout);
  367. timeout = 100 * msecs_to_jiffies(timeout);
  368. reinit_completion(&controller->done);
  369. spin_lock_irqsave(&controller->lock, flags);
  370. controller->xfer = xfer;
  371. controller->error = 0;
  372. controller->rx_bytes = 0;
  373. controller->tx_bytes = 0;
  374. spin_unlock_irqrestore(&controller->lock, flags);
  375. if (spi_qup_set_state(controller, QUP_STATE_RUN)) {
  376. dev_warn(controller->dev, "cannot set RUN state\n");
  377. goto exit;
  378. }
  379. if (spi_qup_set_state(controller, QUP_STATE_PAUSE)) {
  380. dev_warn(controller->dev, "cannot set PAUSE state\n");
  381. goto exit;
  382. }
  383. spi_qup_fifo_write(controller, xfer);
  384. if (spi_qup_set_state(controller, QUP_STATE_RUN)) {
  385. dev_warn(controller->dev, "cannot set EXECUTE state\n");
  386. goto exit;
  387. }
  388. if (!wait_for_completion_timeout(&controller->done, timeout))
  389. ret = -ETIMEDOUT;
  390. exit:
  391. spi_qup_set_state(controller, QUP_STATE_RESET);
  392. spin_lock_irqsave(&controller->lock, flags);
  393. controller->xfer = NULL;
  394. if (!ret)
  395. ret = controller->error;
  396. spin_unlock_irqrestore(&controller->lock, flags);
  397. return ret;
  398. }
  399. static int spi_qup_probe(struct platform_device *pdev)
  400. {
  401. struct spi_master *master;
  402. struct clk *iclk, *cclk;
  403. struct spi_qup *controller;
  404. struct resource *res;
  405. struct device *dev;
  406. void __iomem *base;
  407. u32 max_freq, iomode, num_cs;
  408. int ret, irq, size;
  409. dev = &pdev->dev;
  410. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  411. base = devm_ioremap_resource(dev, res);
  412. if (IS_ERR(base))
  413. return PTR_ERR(base);
  414. irq = platform_get_irq(pdev, 0);
  415. if (irq < 0)
  416. return irq;
  417. cclk = devm_clk_get(dev, "core");
  418. if (IS_ERR(cclk))
  419. return PTR_ERR(cclk);
  420. iclk = devm_clk_get(dev, "iface");
  421. if (IS_ERR(iclk))
  422. return PTR_ERR(iclk);
  423. /* This is optional parameter */
  424. if (of_property_read_u32(dev->of_node, "spi-max-frequency", &max_freq))
  425. max_freq = SPI_MAX_RATE;
  426. if (!max_freq || max_freq > SPI_MAX_RATE) {
  427. dev_err(dev, "invalid clock frequency %d\n", max_freq);
  428. return -ENXIO;
  429. }
  430. ret = clk_prepare_enable(cclk);
  431. if (ret) {
  432. dev_err(dev, "cannot enable core clock\n");
  433. return ret;
  434. }
  435. ret = clk_prepare_enable(iclk);
  436. if (ret) {
  437. clk_disable_unprepare(cclk);
  438. dev_err(dev, "cannot enable iface clock\n");
  439. return ret;
  440. }
  441. master = spi_alloc_master(dev, sizeof(struct spi_qup));
  442. if (!master) {
  443. clk_disable_unprepare(cclk);
  444. clk_disable_unprepare(iclk);
  445. dev_err(dev, "cannot allocate master\n");
  446. return -ENOMEM;
  447. }
  448. /* use num-cs unless not present or out of range */
  449. if (of_property_read_u32(dev->of_node, "num-cs", &num_cs) ||
  450. num_cs > SPI_NUM_CHIPSELECTS)
  451. master->num_chipselect = SPI_NUM_CHIPSELECTS;
  452. else
  453. master->num_chipselect = num_cs;
  454. master->bus_num = pdev->id;
  455. master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
  456. master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
  457. master->max_speed_hz = max_freq;
  458. master->transfer_one = spi_qup_transfer_one;
  459. master->dev.of_node = pdev->dev.of_node;
  460. master->auto_runtime_pm = true;
  461. platform_set_drvdata(pdev, master);
  462. controller = spi_master_get_devdata(master);
  463. controller->dev = dev;
  464. controller->base = base;
  465. controller->iclk = iclk;
  466. controller->cclk = cclk;
  467. controller->irq = irq;
  468. /* set v1 flag if device is version 1 */
  469. if (of_device_is_compatible(dev->of_node, "qcom,spi-qup-v1.1.1"))
  470. controller->qup_v1 = 1;
  471. spin_lock_init(&controller->lock);
  472. init_completion(&controller->done);
  473. iomode = readl_relaxed(base + QUP_IO_M_MODES);
  474. size = QUP_IO_M_OUTPUT_BLOCK_SIZE(iomode);
  475. if (size)
  476. controller->out_blk_sz = size * 16;
  477. else
  478. controller->out_blk_sz = 4;
  479. size = QUP_IO_M_INPUT_BLOCK_SIZE(iomode);
  480. if (size)
  481. controller->in_blk_sz = size * 16;
  482. else
  483. controller->in_blk_sz = 4;
  484. size = QUP_IO_M_OUTPUT_FIFO_SIZE(iomode);
  485. controller->out_fifo_sz = controller->out_blk_sz * (2 << size);
  486. size = QUP_IO_M_INPUT_FIFO_SIZE(iomode);
  487. controller->in_fifo_sz = controller->in_blk_sz * (2 << size);
  488. dev_info(dev, "IN:block:%d, fifo:%d, OUT:block:%d, fifo:%d\n",
  489. controller->in_blk_sz, controller->in_fifo_sz,
  490. controller->out_blk_sz, controller->out_fifo_sz);
  491. writel_relaxed(1, base + QUP_SW_RESET);
  492. ret = spi_qup_set_state(controller, QUP_STATE_RESET);
  493. if (ret) {
  494. dev_err(dev, "cannot set RESET state\n");
  495. goto error;
  496. }
  497. writel_relaxed(0, base + QUP_OPERATIONAL);
  498. writel_relaxed(0, base + QUP_IO_M_MODES);
  499. if (!controller->qup_v1)
  500. writel_relaxed(0, base + QUP_OPERATIONAL_MASK);
  501. writel_relaxed(SPI_ERROR_CLK_UNDER_RUN | SPI_ERROR_CLK_OVER_RUN,
  502. base + SPI_ERROR_FLAGS_EN);
  503. /* if earlier version of the QUP, disable INPUT_OVERRUN */
  504. if (controller->qup_v1)
  505. writel_relaxed(QUP_ERROR_OUTPUT_OVER_RUN |
  506. QUP_ERROR_INPUT_UNDER_RUN | QUP_ERROR_OUTPUT_UNDER_RUN,
  507. base + QUP_ERROR_FLAGS_EN);
  508. writel_relaxed(0, base + SPI_CONFIG);
  509. writel_relaxed(SPI_IO_C_NO_TRI_STATE, base + SPI_IO_CONTROL);
  510. ret = devm_request_irq(dev, irq, spi_qup_qup_irq,
  511. IRQF_TRIGGER_HIGH, pdev->name, controller);
  512. if (ret)
  513. goto error;
  514. pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
  515. pm_runtime_use_autosuspend(dev);
  516. pm_runtime_set_active(dev);
  517. pm_runtime_enable(dev);
  518. ret = devm_spi_register_master(dev, master);
  519. if (ret)
  520. goto disable_pm;
  521. return 0;
  522. disable_pm:
  523. pm_runtime_disable(&pdev->dev);
  524. error:
  525. clk_disable_unprepare(cclk);
  526. clk_disable_unprepare(iclk);
  527. spi_master_put(master);
  528. return ret;
  529. }
  530. #ifdef CONFIG_PM
  531. static int spi_qup_pm_suspend_runtime(struct device *device)
  532. {
  533. struct spi_master *master = dev_get_drvdata(device);
  534. struct spi_qup *controller = spi_master_get_devdata(master);
  535. u32 config;
  536. /* Enable clocks auto gaiting */
  537. config = readl(controller->base + QUP_CONFIG);
  538. config |= QUP_CONFIG_CLOCK_AUTO_GATE;
  539. writel_relaxed(config, controller->base + QUP_CONFIG);
  540. return 0;
  541. }
  542. static int spi_qup_pm_resume_runtime(struct device *device)
  543. {
  544. struct spi_master *master = dev_get_drvdata(device);
  545. struct spi_qup *controller = spi_master_get_devdata(master);
  546. u32 config;
  547. /* Disable clocks auto gaiting */
  548. config = readl_relaxed(controller->base + QUP_CONFIG);
  549. config &= ~QUP_CONFIG_CLOCK_AUTO_GATE;
  550. writel_relaxed(config, controller->base + QUP_CONFIG);
  551. return 0;
  552. }
  553. #endif /* CONFIG_PM */
  554. #ifdef CONFIG_PM_SLEEP
  555. static int spi_qup_suspend(struct device *device)
  556. {
  557. struct spi_master *master = dev_get_drvdata(device);
  558. struct spi_qup *controller = spi_master_get_devdata(master);
  559. int ret;
  560. ret = spi_master_suspend(master);
  561. if (ret)
  562. return ret;
  563. ret = spi_qup_set_state(controller, QUP_STATE_RESET);
  564. if (ret)
  565. return ret;
  566. clk_disable_unprepare(controller->cclk);
  567. clk_disable_unprepare(controller->iclk);
  568. return 0;
  569. }
  570. static int spi_qup_resume(struct device *device)
  571. {
  572. struct spi_master *master = dev_get_drvdata(device);
  573. struct spi_qup *controller = spi_master_get_devdata(master);
  574. int ret;
  575. ret = clk_prepare_enable(controller->iclk);
  576. if (ret)
  577. return ret;
  578. ret = clk_prepare_enable(controller->cclk);
  579. if (ret)
  580. return ret;
  581. ret = spi_qup_set_state(controller, QUP_STATE_RESET);
  582. if (ret)
  583. return ret;
  584. return spi_master_resume(master);
  585. }
  586. #endif /* CONFIG_PM_SLEEP */
  587. static int spi_qup_remove(struct platform_device *pdev)
  588. {
  589. struct spi_master *master = dev_get_drvdata(&pdev->dev);
  590. struct spi_qup *controller = spi_master_get_devdata(master);
  591. int ret;
  592. ret = pm_runtime_get_sync(&pdev->dev);
  593. if (ret < 0)
  594. return ret;
  595. ret = spi_qup_set_state(controller, QUP_STATE_RESET);
  596. if (ret)
  597. return ret;
  598. clk_disable_unprepare(controller->cclk);
  599. clk_disable_unprepare(controller->iclk);
  600. pm_runtime_put_noidle(&pdev->dev);
  601. pm_runtime_disable(&pdev->dev);
  602. return 0;
  603. }
  604. static const struct of_device_id spi_qup_dt_match[] = {
  605. { .compatible = "qcom,spi-qup-v1.1.1", },
  606. { .compatible = "qcom,spi-qup-v2.1.1", },
  607. { .compatible = "qcom,spi-qup-v2.2.1", },
  608. { }
  609. };
  610. MODULE_DEVICE_TABLE(of, spi_qup_dt_match);
  611. static const struct dev_pm_ops spi_qup_dev_pm_ops = {
  612. SET_SYSTEM_SLEEP_PM_OPS(spi_qup_suspend, spi_qup_resume)
  613. SET_RUNTIME_PM_OPS(spi_qup_pm_suspend_runtime,
  614. spi_qup_pm_resume_runtime,
  615. NULL)
  616. };
  617. static struct platform_driver spi_qup_driver = {
  618. .driver = {
  619. .name = "spi_qup",
  620. .pm = &spi_qup_dev_pm_ops,
  621. .of_match_table = spi_qup_dt_match,
  622. },
  623. .probe = spi_qup_probe,
  624. .remove = spi_qup_remove,
  625. };
  626. module_platform_driver(spi_qup_driver);
  627. MODULE_LICENSE("GPL v2");
  628. MODULE_ALIAS("platform:spi_qup");