spi-qup.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761
  1. /*
  2. * Copyright (c) 2008-2014, The Linux foundation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License rev 2 and
  6. * only rev 2 as published by the free Software foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or fITNESS fOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. */
  13. #include <linux/clk.h>
  14. #include <linux/delay.h>
  15. #include <linux/err.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/io.h>
  18. #include <linux/list.h>
  19. #include <linux/module.h>
  20. #include <linux/of.h>
  21. #include <linux/platform_device.h>
  22. #include <linux/pm_runtime.h>
  23. #include <linux/spi/spi.h>
  24. #define QUP_CONFIG 0x0000
  25. #define QUP_STATE 0x0004
  26. #define QUP_IO_M_MODES 0x0008
  27. #define QUP_SW_RESET 0x000c
  28. #define QUP_OPERATIONAL 0x0018
  29. #define QUP_ERROR_FLAGS 0x001c
  30. #define QUP_ERROR_FLAGS_EN 0x0020
  31. #define QUP_OPERATIONAL_MASK 0x0028
  32. #define QUP_HW_VERSION 0x0030
  33. #define QUP_MX_OUTPUT_CNT 0x0100
  34. #define QUP_OUTPUT_FIFO 0x0110
  35. #define QUP_MX_WRITE_CNT 0x0150
  36. #define QUP_MX_INPUT_CNT 0x0200
  37. #define QUP_MX_READ_CNT 0x0208
  38. #define QUP_INPUT_FIFO 0x0218
  39. #define SPI_CONFIG 0x0300
  40. #define SPI_IO_CONTROL 0x0304
  41. #define SPI_ERROR_FLAGS 0x0308
  42. #define SPI_ERROR_FLAGS_EN 0x030c
  43. /* QUP_CONFIG fields */
  44. #define QUP_CONFIG_SPI_MODE (1 << 8)
  45. #define QUP_CONFIG_CLOCK_AUTO_GATE BIT(13)
  46. #define QUP_CONFIG_NO_INPUT BIT(7)
  47. #define QUP_CONFIG_NO_OUTPUT BIT(6)
  48. #define QUP_CONFIG_N 0x001f
  49. /* QUP_STATE fields */
  50. #define QUP_STATE_VALID BIT(2)
  51. #define QUP_STATE_RESET 0
  52. #define QUP_STATE_RUN 1
  53. #define QUP_STATE_PAUSE 3
  54. #define QUP_STATE_MASK 3
  55. #define QUP_STATE_CLEAR 2
  56. #define QUP_HW_VERSION_2_1_1 0x20010001
  57. /* QUP_IO_M_MODES fields */
  58. #define QUP_IO_M_PACK_EN BIT(15)
  59. #define QUP_IO_M_UNPACK_EN BIT(14)
  60. #define QUP_IO_M_INPUT_MODE_MASK_SHIFT 12
  61. #define QUP_IO_M_OUTPUT_MODE_MASK_SHIFT 10
  62. #define QUP_IO_M_INPUT_MODE_MASK (3 << QUP_IO_M_INPUT_MODE_MASK_SHIFT)
  63. #define QUP_IO_M_OUTPUT_MODE_MASK (3 << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT)
  64. #define QUP_IO_M_OUTPUT_BLOCK_SIZE(x) (((x) & (0x03 << 0)) >> 0)
  65. #define QUP_IO_M_OUTPUT_FIFO_SIZE(x) (((x) & (0x07 << 2)) >> 2)
  66. #define QUP_IO_M_INPUT_BLOCK_SIZE(x) (((x) & (0x03 << 5)) >> 5)
  67. #define QUP_IO_M_INPUT_FIFO_SIZE(x) (((x) & (0x07 << 7)) >> 7)
  68. #define QUP_IO_M_MODE_FIFO 0
  69. #define QUP_IO_M_MODE_BLOCK 1
  70. #define QUP_IO_M_MODE_DMOV 2
  71. #define QUP_IO_M_MODE_BAM 3
  72. /* QUP_OPERATIONAL fields */
  73. #define QUP_OP_MAX_INPUT_DONE_FLAG BIT(11)
  74. #define QUP_OP_MAX_OUTPUT_DONE_FLAG BIT(10)
  75. #define QUP_OP_IN_SERVICE_FLAG BIT(9)
  76. #define QUP_OP_OUT_SERVICE_FLAG BIT(8)
  77. #define QUP_OP_IN_FIFO_FULL BIT(7)
  78. #define QUP_OP_OUT_FIFO_FULL BIT(6)
  79. #define QUP_OP_IN_FIFO_NOT_EMPTY BIT(5)
  80. #define QUP_OP_OUT_FIFO_NOT_EMPTY BIT(4)
  81. /* QUP_ERROR_FLAGS and QUP_ERROR_FLAGS_EN fields */
  82. #define QUP_ERROR_OUTPUT_OVER_RUN BIT(5)
  83. #define QUP_ERROR_INPUT_UNDER_RUN BIT(4)
  84. #define QUP_ERROR_OUTPUT_UNDER_RUN BIT(3)
  85. #define QUP_ERROR_INPUT_OVER_RUN BIT(2)
  86. /* SPI_CONFIG fields */
  87. #define SPI_CONFIG_HS_MODE BIT(10)
  88. #define SPI_CONFIG_INPUT_FIRST BIT(9)
  89. #define SPI_CONFIG_LOOPBACK BIT(8)
  90. /* SPI_IO_CONTROL fields */
  91. #define SPI_IO_C_FORCE_CS BIT(11)
  92. #define SPI_IO_C_CLK_IDLE_HIGH BIT(10)
  93. #define SPI_IO_C_MX_CS_MODE BIT(8)
  94. #define SPI_IO_C_CS_N_POLARITY_0 BIT(4)
  95. #define SPI_IO_C_CS_SELECT(x) (((x) & 3) << 2)
  96. #define SPI_IO_C_CS_SELECT_MASK 0x000c
  97. #define SPI_IO_C_TRISTATE_CS BIT(1)
  98. #define SPI_IO_C_NO_TRI_STATE BIT(0)
  99. /* SPI_ERROR_FLAGS and SPI_ERROR_FLAGS_EN fields */
  100. #define SPI_ERROR_CLK_OVER_RUN BIT(1)
  101. #define SPI_ERROR_CLK_UNDER_RUN BIT(0)
  102. #define SPI_NUM_CHIPSELECTS 4
  103. /* high speed mode is when bus rate is greater then 26MHz */
  104. #define SPI_HS_MIN_RATE 26000000
  105. #define SPI_MAX_RATE 50000000
  106. #define SPI_DELAY_THRESHOLD 1
  107. #define SPI_DELAY_RETRY 10
  108. struct spi_qup {
  109. void __iomem *base;
  110. struct device *dev;
  111. struct clk *cclk; /* core clock */
  112. struct clk *iclk; /* interface clock */
  113. int irq;
  114. spinlock_t lock;
  115. int in_fifo_sz;
  116. int out_fifo_sz;
  117. int in_blk_sz;
  118. int out_blk_sz;
  119. struct spi_transfer *xfer;
  120. struct completion done;
  121. int error;
  122. int w_size; /* bytes per SPI word */
  123. int tx_bytes;
  124. int rx_bytes;
  125. };
  126. static inline bool spi_qup_is_valid_state(struct spi_qup *controller)
  127. {
  128. u32 opstate = readl_relaxed(controller->base + QUP_STATE);
  129. return opstate & QUP_STATE_VALID;
  130. }
  131. static int spi_qup_set_state(struct spi_qup *controller, u32 state)
  132. {
  133. unsigned long loop;
  134. u32 cur_state;
  135. loop = 0;
  136. while (!spi_qup_is_valid_state(controller)) {
  137. usleep_range(SPI_DELAY_THRESHOLD, SPI_DELAY_THRESHOLD * 2);
  138. if (++loop > SPI_DELAY_RETRY)
  139. return -EIO;
  140. }
  141. if (loop)
  142. dev_dbg(controller->dev, "invalid state for %ld,us %d\n",
  143. loop, state);
  144. cur_state = readl_relaxed(controller->base + QUP_STATE);
  145. /*
  146. * Per spec: for PAUSE_STATE to RESET_STATE, two writes
  147. * of (b10) are required
  148. */
  149. if (((cur_state & QUP_STATE_MASK) == QUP_STATE_PAUSE) &&
  150. (state == QUP_STATE_RESET)) {
  151. writel_relaxed(QUP_STATE_CLEAR, controller->base + QUP_STATE);
  152. writel_relaxed(QUP_STATE_CLEAR, controller->base + QUP_STATE);
  153. } else {
  154. cur_state &= ~QUP_STATE_MASK;
  155. cur_state |= state;
  156. writel_relaxed(cur_state, controller->base + QUP_STATE);
  157. }
  158. loop = 0;
  159. while (!spi_qup_is_valid_state(controller)) {
  160. usleep_range(SPI_DELAY_THRESHOLD, SPI_DELAY_THRESHOLD * 2);
  161. if (++loop > SPI_DELAY_RETRY)
  162. return -EIO;
  163. }
  164. return 0;
  165. }
  166. static void spi_qup_fifo_read(struct spi_qup *controller,
  167. struct spi_transfer *xfer)
  168. {
  169. u8 *rx_buf = xfer->rx_buf;
  170. u32 word, state;
  171. int idx, shift, w_size;
  172. w_size = controller->w_size;
  173. while (controller->rx_bytes < xfer->len) {
  174. state = readl_relaxed(controller->base + QUP_OPERATIONAL);
  175. if (0 == (state & QUP_OP_IN_FIFO_NOT_EMPTY))
  176. break;
  177. word = readl_relaxed(controller->base + QUP_INPUT_FIFO);
  178. if (!rx_buf) {
  179. controller->rx_bytes += w_size;
  180. continue;
  181. }
  182. for (idx = 0; idx < w_size; idx++, controller->rx_bytes++) {
  183. /*
  184. * The data format depends on bytes per SPI word:
  185. * 4 bytes: 0x12345678
  186. * 2 bytes: 0x00001234
  187. * 1 byte : 0x00000012
  188. */
  189. shift = BITS_PER_BYTE;
  190. shift *= (w_size - idx - 1);
  191. rx_buf[controller->rx_bytes] = word >> shift;
  192. }
  193. }
  194. }
  195. static void spi_qup_fifo_write(struct spi_qup *controller,
  196. struct spi_transfer *xfer)
  197. {
  198. const u8 *tx_buf = xfer->tx_buf;
  199. u32 word, state, data;
  200. int idx, w_size;
  201. w_size = controller->w_size;
  202. while (controller->tx_bytes < xfer->len) {
  203. state = readl_relaxed(controller->base + QUP_OPERATIONAL);
  204. if (state & QUP_OP_OUT_FIFO_FULL)
  205. break;
  206. word = 0;
  207. for (idx = 0; idx < w_size; idx++, controller->tx_bytes++) {
  208. if (!tx_buf) {
  209. controller->tx_bytes += w_size;
  210. break;
  211. }
  212. data = tx_buf[controller->tx_bytes];
  213. word |= data << (BITS_PER_BYTE * (3 - idx));
  214. }
  215. writel_relaxed(word, controller->base + QUP_OUTPUT_FIFO);
  216. }
  217. }
  218. static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id)
  219. {
  220. struct spi_qup *controller = dev_id;
  221. struct spi_transfer *xfer;
  222. u32 opflags, qup_err, spi_err;
  223. unsigned long flags;
  224. int error = 0;
  225. spin_lock_irqsave(&controller->lock, flags);
  226. xfer = controller->xfer;
  227. controller->xfer = NULL;
  228. spin_unlock_irqrestore(&controller->lock, flags);
  229. qup_err = readl_relaxed(controller->base + QUP_ERROR_FLAGS);
  230. spi_err = readl_relaxed(controller->base + SPI_ERROR_FLAGS);
  231. opflags = readl_relaxed(controller->base + QUP_OPERATIONAL);
  232. writel_relaxed(qup_err, controller->base + QUP_ERROR_FLAGS);
  233. writel_relaxed(spi_err, controller->base + SPI_ERROR_FLAGS);
  234. writel_relaxed(opflags, controller->base + QUP_OPERATIONAL);
  235. if (!xfer) {
  236. dev_err_ratelimited(controller->dev, "unexpected irq %08x %08x %08x\n",
  237. qup_err, spi_err, opflags);
  238. return IRQ_HANDLED;
  239. }
  240. if (qup_err) {
  241. if (qup_err & QUP_ERROR_OUTPUT_OVER_RUN)
  242. dev_warn(controller->dev, "OUTPUT_OVER_RUN\n");
  243. if (qup_err & QUP_ERROR_INPUT_UNDER_RUN)
  244. dev_warn(controller->dev, "INPUT_UNDER_RUN\n");
  245. if (qup_err & QUP_ERROR_OUTPUT_UNDER_RUN)
  246. dev_warn(controller->dev, "OUTPUT_UNDER_RUN\n");
  247. if (qup_err & QUP_ERROR_INPUT_OVER_RUN)
  248. dev_warn(controller->dev, "INPUT_OVER_RUN\n");
  249. error = -EIO;
  250. }
  251. if (spi_err) {
  252. if (spi_err & SPI_ERROR_CLK_OVER_RUN)
  253. dev_warn(controller->dev, "CLK_OVER_RUN\n");
  254. if (spi_err & SPI_ERROR_CLK_UNDER_RUN)
  255. dev_warn(controller->dev, "CLK_UNDER_RUN\n");
  256. error = -EIO;
  257. }
  258. if (opflags & QUP_OP_IN_SERVICE_FLAG)
  259. spi_qup_fifo_read(controller, xfer);
  260. if (opflags & QUP_OP_OUT_SERVICE_FLAG)
  261. spi_qup_fifo_write(controller, xfer);
  262. spin_lock_irqsave(&controller->lock, flags);
  263. controller->error = error;
  264. controller->xfer = xfer;
  265. spin_unlock_irqrestore(&controller->lock, flags);
  266. if (controller->rx_bytes == xfer->len || error)
  267. complete(&controller->done);
  268. return IRQ_HANDLED;
  269. }
  270. /* set clock freq ... bits per word */
  271. static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer)
  272. {
  273. struct spi_qup *controller = spi_master_get_devdata(spi->master);
  274. u32 config, iomode, mode;
  275. int ret, n_words, w_size;
  276. if (spi->mode & SPI_LOOP && xfer->len > controller->in_fifo_sz) {
  277. dev_err(controller->dev, "too big size for loopback %d > %d\n",
  278. xfer->len, controller->in_fifo_sz);
  279. return -EIO;
  280. }
  281. ret = clk_set_rate(controller->cclk, xfer->speed_hz);
  282. if (ret) {
  283. dev_err(controller->dev, "fail to set frequency %d",
  284. xfer->speed_hz);
  285. return -EIO;
  286. }
  287. if (spi_qup_set_state(controller, QUP_STATE_RESET)) {
  288. dev_err(controller->dev, "cannot set RESET state\n");
  289. return -EIO;
  290. }
  291. w_size = 4;
  292. if (xfer->bits_per_word <= 8)
  293. w_size = 1;
  294. else if (xfer->bits_per_word <= 16)
  295. w_size = 2;
  296. n_words = xfer->len / w_size;
  297. controller->w_size = w_size;
  298. if (n_words <= (controller->in_fifo_sz / sizeof(u32))) {
  299. mode = QUP_IO_M_MODE_FIFO;
  300. writel_relaxed(n_words, controller->base + QUP_MX_READ_CNT);
  301. writel_relaxed(n_words, controller->base + QUP_MX_WRITE_CNT);
  302. /* must be zero for FIFO */
  303. writel_relaxed(0, controller->base + QUP_MX_INPUT_CNT);
  304. writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT);
  305. } else {
  306. mode = QUP_IO_M_MODE_BLOCK;
  307. writel_relaxed(n_words, controller->base + QUP_MX_INPUT_CNT);
  308. writel_relaxed(n_words, controller->base + QUP_MX_OUTPUT_CNT);
  309. /* must be zero for BLOCK and BAM */
  310. writel_relaxed(0, controller->base + QUP_MX_READ_CNT);
  311. writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT);
  312. }
  313. iomode = readl_relaxed(controller->base + QUP_IO_M_MODES);
  314. /* Set input and output transfer mode */
  315. iomode &= ~(QUP_IO_M_INPUT_MODE_MASK | QUP_IO_M_OUTPUT_MODE_MASK);
  316. iomode &= ~(QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN);
  317. iomode |= (mode << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT);
  318. iomode |= (mode << QUP_IO_M_INPUT_MODE_MASK_SHIFT);
  319. writel_relaxed(iomode, controller->base + QUP_IO_M_MODES);
  320. config = readl_relaxed(controller->base + SPI_CONFIG);
  321. if (spi->mode & SPI_LOOP)
  322. config |= SPI_CONFIG_LOOPBACK;
  323. else
  324. config &= ~SPI_CONFIG_LOOPBACK;
  325. if (spi->mode & SPI_CPHA)
  326. config &= ~SPI_CONFIG_INPUT_FIRST;
  327. else
  328. config |= SPI_CONFIG_INPUT_FIRST;
  329. /*
  330. * HS_MODE improves signal stability for spi-clk high rates,
  331. * but is invalid in loop back mode.
  332. */
  333. if ((xfer->speed_hz >= SPI_HS_MIN_RATE) && !(spi->mode & SPI_LOOP))
  334. config |= SPI_CONFIG_HS_MODE;
  335. else
  336. config &= ~SPI_CONFIG_HS_MODE;
  337. writel_relaxed(config, controller->base + SPI_CONFIG);
  338. config = readl_relaxed(controller->base + QUP_CONFIG);
  339. config &= ~(QUP_CONFIG_NO_INPUT | QUP_CONFIG_NO_OUTPUT | QUP_CONFIG_N);
  340. config |= xfer->bits_per_word - 1;
  341. config |= QUP_CONFIG_SPI_MODE;
  342. writel_relaxed(config, controller->base + QUP_CONFIG);
  343. writel_relaxed(0, controller->base + QUP_OPERATIONAL_MASK);
  344. return 0;
  345. }
  346. static int spi_qup_transfer_one(struct spi_master *master,
  347. struct spi_device *spi,
  348. struct spi_transfer *xfer)
  349. {
  350. struct spi_qup *controller = spi_master_get_devdata(master);
  351. unsigned long timeout, flags;
  352. int ret = -EIO;
  353. ret = spi_qup_io_config(spi, xfer);
  354. if (ret)
  355. return ret;
  356. timeout = DIV_ROUND_UP(xfer->speed_hz, MSEC_PER_SEC);
  357. timeout = DIV_ROUND_UP(xfer->len * 8, timeout);
  358. timeout = 100 * msecs_to_jiffies(timeout);
  359. reinit_completion(&controller->done);
  360. spin_lock_irqsave(&controller->lock, flags);
  361. controller->xfer = xfer;
  362. controller->error = 0;
  363. controller->rx_bytes = 0;
  364. controller->tx_bytes = 0;
  365. spin_unlock_irqrestore(&controller->lock, flags);
  366. if (spi_qup_set_state(controller, QUP_STATE_RUN)) {
  367. dev_warn(controller->dev, "cannot set RUN state\n");
  368. goto exit;
  369. }
  370. if (spi_qup_set_state(controller, QUP_STATE_PAUSE)) {
  371. dev_warn(controller->dev, "cannot set PAUSE state\n");
  372. goto exit;
  373. }
  374. spi_qup_fifo_write(controller, xfer);
  375. if (spi_qup_set_state(controller, QUP_STATE_RUN)) {
  376. dev_warn(controller->dev, "cannot set EXECUTE state\n");
  377. goto exit;
  378. }
  379. if (!wait_for_completion_timeout(&controller->done, timeout))
  380. ret = -ETIMEDOUT;
  381. exit:
  382. spi_qup_set_state(controller, QUP_STATE_RESET);
  383. spin_lock_irqsave(&controller->lock, flags);
  384. controller->xfer = NULL;
  385. if (!ret)
  386. ret = controller->error;
  387. spin_unlock_irqrestore(&controller->lock, flags);
  388. return ret;
  389. }
  390. static int spi_qup_probe(struct platform_device *pdev)
  391. {
  392. struct spi_master *master;
  393. struct clk *iclk, *cclk;
  394. struct spi_qup *controller;
  395. struct resource *res;
  396. struct device *dev;
  397. void __iomem *base;
  398. u32 data, max_freq, iomode;
  399. int ret, irq, size;
  400. dev = &pdev->dev;
  401. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  402. base = devm_ioremap_resource(dev, res);
  403. if (IS_ERR(base))
  404. return PTR_ERR(base);
  405. irq = platform_get_irq(pdev, 0);
  406. if (irq < 0)
  407. return irq;
  408. cclk = devm_clk_get(dev, "core");
  409. if (IS_ERR(cclk))
  410. return PTR_ERR(cclk);
  411. iclk = devm_clk_get(dev, "iface");
  412. if (IS_ERR(iclk))
  413. return PTR_ERR(iclk);
  414. /* This is optional parameter */
  415. if (of_property_read_u32(dev->of_node, "spi-max-frequency", &max_freq))
  416. max_freq = SPI_MAX_RATE;
  417. if (!max_freq || max_freq > SPI_MAX_RATE) {
  418. dev_err(dev, "invalid clock frequency %d\n", max_freq);
  419. return -ENXIO;
  420. }
  421. ret = clk_prepare_enable(cclk);
  422. if (ret) {
  423. dev_err(dev, "cannot enable core clock\n");
  424. return ret;
  425. }
  426. ret = clk_prepare_enable(iclk);
  427. if (ret) {
  428. clk_disable_unprepare(cclk);
  429. dev_err(dev, "cannot enable iface clock\n");
  430. return ret;
  431. }
  432. data = readl_relaxed(base + QUP_HW_VERSION);
  433. if (data < QUP_HW_VERSION_2_1_1) {
  434. clk_disable_unprepare(cclk);
  435. clk_disable_unprepare(iclk);
  436. dev_err(dev, "v.%08x is not supported\n", data);
  437. return -ENXIO;
  438. }
  439. master = spi_alloc_master(dev, sizeof(struct spi_qup));
  440. if (!master) {
  441. clk_disable_unprepare(cclk);
  442. clk_disable_unprepare(iclk);
  443. dev_err(dev, "cannot allocate master\n");
  444. return -ENOMEM;
  445. }
  446. /* use num-cs unless not present or out of range */
  447. if (of_property_read_u16(dev->of_node, "num-cs",
  448. &master->num_chipselect) ||
  449. (master->num_chipselect > SPI_NUM_CHIPSELECTS))
  450. master->num_chipselect = SPI_NUM_CHIPSELECTS;
  451. master->bus_num = pdev->id;
  452. master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
  453. master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
  454. master->max_speed_hz = max_freq;
  455. master->transfer_one = spi_qup_transfer_one;
  456. master->dev.of_node = pdev->dev.of_node;
  457. master->auto_runtime_pm = true;
  458. platform_set_drvdata(pdev, master);
  459. controller = spi_master_get_devdata(master);
  460. controller->dev = dev;
  461. controller->base = base;
  462. controller->iclk = iclk;
  463. controller->cclk = cclk;
  464. controller->irq = irq;
  465. spin_lock_init(&controller->lock);
  466. init_completion(&controller->done);
  467. iomode = readl_relaxed(base + QUP_IO_M_MODES);
  468. size = QUP_IO_M_OUTPUT_BLOCK_SIZE(iomode);
  469. if (size)
  470. controller->out_blk_sz = size * 16;
  471. else
  472. controller->out_blk_sz = 4;
  473. size = QUP_IO_M_INPUT_BLOCK_SIZE(iomode);
  474. if (size)
  475. controller->in_blk_sz = size * 16;
  476. else
  477. controller->in_blk_sz = 4;
  478. size = QUP_IO_M_OUTPUT_FIFO_SIZE(iomode);
  479. controller->out_fifo_sz = controller->out_blk_sz * (2 << size);
  480. size = QUP_IO_M_INPUT_FIFO_SIZE(iomode);
  481. controller->in_fifo_sz = controller->in_blk_sz * (2 << size);
  482. dev_info(dev, "v.%08x IN:block:%d, fifo:%d, OUT:block:%d, fifo:%d\n",
  483. data, controller->in_blk_sz, controller->in_fifo_sz,
  484. controller->out_blk_sz, controller->out_fifo_sz);
  485. writel_relaxed(1, base + QUP_SW_RESET);
  486. ret = spi_qup_set_state(controller, QUP_STATE_RESET);
  487. if (ret) {
  488. dev_err(dev, "cannot set RESET state\n");
  489. goto error;
  490. }
  491. writel_relaxed(0, base + QUP_OPERATIONAL);
  492. writel_relaxed(0, base + QUP_IO_M_MODES);
  493. writel_relaxed(0, base + QUP_OPERATIONAL_MASK);
  494. writel_relaxed(SPI_ERROR_CLK_UNDER_RUN | SPI_ERROR_CLK_OVER_RUN,
  495. base + SPI_ERROR_FLAGS_EN);
  496. writel_relaxed(0, base + SPI_CONFIG);
  497. writel_relaxed(SPI_IO_C_NO_TRI_STATE, base + SPI_IO_CONTROL);
  498. ret = devm_request_irq(dev, irq, spi_qup_qup_irq,
  499. IRQF_TRIGGER_HIGH, pdev->name, controller);
  500. if (ret)
  501. goto error;
  502. pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
  503. pm_runtime_use_autosuspend(dev);
  504. pm_runtime_set_active(dev);
  505. pm_runtime_enable(dev);
  506. ret = devm_spi_register_master(dev, master);
  507. if (ret)
  508. goto disable_pm;
  509. return 0;
  510. disable_pm:
  511. pm_runtime_disable(&pdev->dev);
  512. error:
  513. clk_disable_unprepare(cclk);
  514. clk_disable_unprepare(iclk);
  515. spi_master_put(master);
  516. return ret;
  517. }
  518. #ifdef CONFIG_PM_RUNTIME
  519. static int spi_qup_pm_suspend_runtime(struct device *device)
  520. {
  521. struct spi_master *master = dev_get_drvdata(device);
  522. struct spi_qup *controller = spi_master_get_devdata(master);
  523. u32 config;
  524. /* Enable clocks auto gaiting */
  525. config = readl(controller->base + QUP_CONFIG);
  526. config |= QUP_CONFIG_CLOCK_AUTO_GATE;
  527. writel_relaxed(config, controller->base + QUP_CONFIG);
  528. return 0;
  529. }
  530. static int spi_qup_pm_resume_runtime(struct device *device)
  531. {
  532. struct spi_master *master = dev_get_drvdata(device);
  533. struct spi_qup *controller = spi_master_get_devdata(master);
  534. u32 config;
  535. /* Disable clocks auto gaiting */
  536. config = readl_relaxed(controller->base + QUP_CONFIG);
  537. config &= ~QUP_CONFIG_CLOCK_AUTO_GATE;
  538. writel_relaxed(config, controller->base + QUP_CONFIG);
  539. return 0;
  540. }
  541. #endif /* CONFIG_PM_RUNTIME */
  542. #ifdef CONFIG_PM_SLEEP
  543. static int spi_qup_suspend(struct device *device)
  544. {
  545. struct spi_master *master = dev_get_drvdata(device);
  546. struct spi_qup *controller = spi_master_get_devdata(master);
  547. int ret;
  548. ret = spi_master_suspend(master);
  549. if (ret)
  550. return ret;
  551. ret = spi_qup_set_state(controller, QUP_STATE_RESET);
  552. if (ret)
  553. return ret;
  554. clk_disable_unprepare(controller->cclk);
  555. clk_disable_unprepare(controller->iclk);
  556. return 0;
  557. }
  558. static int spi_qup_resume(struct device *device)
  559. {
  560. struct spi_master *master = dev_get_drvdata(device);
  561. struct spi_qup *controller = spi_master_get_devdata(master);
  562. int ret;
  563. ret = clk_prepare_enable(controller->iclk);
  564. if (ret)
  565. return ret;
  566. ret = clk_prepare_enable(controller->cclk);
  567. if (ret)
  568. return ret;
  569. ret = spi_qup_set_state(controller, QUP_STATE_RESET);
  570. if (ret)
  571. return ret;
  572. return spi_master_resume(master);
  573. }
  574. #endif /* CONFIG_PM_SLEEP */
  575. static int spi_qup_remove(struct platform_device *pdev)
  576. {
  577. struct spi_master *master = dev_get_drvdata(&pdev->dev);
  578. struct spi_qup *controller = spi_master_get_devdata(master);
  579. int ret;
  580. ret = pm_runtime_get_sync(&pdev->dev);
  581. if (ret < 0)
  582. return ret;
  583. ret = spi_qup_set_state(controller, QUP_STATE_RESET);
  584. if (ret)
  585. return ret;
  586. clk_disable_unprepare(controller->cclk);
  587. clk_disable_unprepare(controller->iclk);
  588. pm_runtime_put_noidle(&pdev->dev);
  589. pm_runtime_disable(&pdev->dev);
  590. return 0;
  591. }
  592. static const struct of_device_id spi_qup_dt_match[] = {
  593. { .compatible = "qcom,spi-qup-v2.1.1", },
  594. { .compatible = "qcom,spi-qup-v2.2.1", },
  595. { }
  596. };
  597. MODULE_DEVICE_TABLE(of, spi_qup_dt_match);
  598. static const struct dev_pm_ops spi_qup_dev_pm_ops = {
  599. SET_SYSTEM_SLEEP_PM_OPS(spi_qup_suspend, spi_qup_resume)
  600. SET_RUNTIME_PM_OPS(spi_qup_pm_suspend_runtime,
  601. spi_qup_pm_resume_runtime,
  602. NULL)
  603. };
  604. static struct platform_driver spi_qup_driver = {
  605. .driver = {
  606. .name = "spi_qup",
  607. .owner = THIS_MODULE,
  608. .pm = &spi_qup_dev_pm_ops,
  609. .of_match_table = spi_qup_dt_match,
  610. },
  611. .probe = spi_qup_probe,
  612. .remove = spi_qup_remove,
  613. };
  614. module_platform_driver(spi_qup_driver);
  615. MODULE_LICENSE("GPL v2");
  616. MODULE_ALIAS("platform:spi_qup");