spi-qup.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769
  1. /*
  2. * Copyright (c) 2008-2014, The Linux foundation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License rev 2 and
  6. * only rev 2 as published by the free Software foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or fITNESS fOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. */
  13. #include <linux/clk.h>
  14. #include <linux/delay.h>
  15. #include <linux/err.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/io.h>
  18. #include <linux/list.h>
  19. #include <linux/module.h>
  20. #include <linux/of.h>
  21. #include <linux/platform_device.h>
  22. #include <linux/pm_runtime.h>
  23. #include <linux/spi/spi.h>
  24. #define QUP_CONFIG 0x0000
  25. #define QUP_STATE 0x0004
  26. #define QUP_IO_M_MODES 0x0008
  27. #define QUP_SW_RESET 0x000c
  28. #define QUP_OPERATIONAL 0x0018
  29. #define QUP_ERROR_FLAGS 0x001c
  30. #define QUP_ERROR_FLAGS_EN 0x0020
  31. #define QUP_OPERATIONAL_MASK 0x0028
  32. #define QUP_HW_VERSION 0x0030
  33. #define QUP_MX_OUTPUT_CNT 0x0100
  34. #define QUP_OUTPUT_FIFO 0x0110
  35. #define QUP_MX_WRITE_CNT 0x0150
  36. #define QUP_MX_INPUT_CNT 0x0200
  37. #define QUP_MX_READ_CNT 0x0208
  38. #define QUP_INPUT_FIFO 0x0218
  39. #define SPI_CONFIG 0x0300
  40. #define SPI_IO_CONTROL 0x0304
  41. #define SPI_ERROR_FLAGS 0x0308
  42. #define SPI_ERROR_FLAGS_EN 0x030c
  43. /* QUP_CONFIG fields */
  44. #define QUP_CONFIG_SPI_MODE (1 << 8)
  45. #define QUP_CONFIG_CLOCK_AUTO_GATE BIT(13)
  46. #define QUP_CONFIG_NO_INPUT BIT(7)
  47. #define QUP_CONFIG_NO_OUTPUT BIT(6)
  48. #define QUP_CONFIG_N 0x001f
  49. /* QUP_STATE fields */
  50. #define QUP_STATE_VALID BIT(2)
  51. #define QUP_STATE_RESET 0
  52. #define QUP_STATE_RUN 1
  53. #define QUP_STATE_PAUSE 3
  54. #define QUP_STATE_MASK 3
  55. #define QUP_STATE_CLEAR 2
  56. #define QUP_HW_VERSION_2_1_1 0x20010001
  57. /* QUP_IO_M_MODES fields */
  58. #define QUP_IO_M_PACK_EN BIT(15)
  59. #define QUP_IO_M_UNPACK_EN BIT(14)
  60. #define QUP_IO_M_INPUT_MODE_MASK_SHIFT 12
  61. #define QUP_IO_M_OUTPUT_MODE_MASK_SHIFT 10
  62. #define QUP_IO_M_INPUT_MODE_MASK (3 << QUP_IO_M_INPUT_MODE_MASK_SHIFT)
  63. #define QUP_IO_M_OUTPUT_MODE_MASK (3 << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT)
  64. #define QUP_IO_M_OUTPUT_BLOCK_SIZE(x) (((x) & (0x03 << 0)) >> 0)
  65. #define QUP_IO_M_OUTPUT_FIFO_SIZE(x) (((x) & (0x07 << 2)) >> 2)
  66. #define QUP_IO_M_INPUT_BLOCK_SIZE(x) (((x) & (0x03 << 5)) >> 5)
  67. #define QUP_IO_M_INPUT_FIFO_SIZE(x) (((x) & (0x07 << 7)) >> 7)
  68. #define QUP_IO_M_MODE_FIFO 0
  69. #define QUP_IO_M_MODE_BLOCK 1
  70. #define QUP_IO_M_MODE_DMOV 2
  71. #define QUP_IO_M_MODE_BAM 3
  72. /* QUP_OPERATIONAL fields */
  73. #define QUP_OP_MAX_INPUT_DONE_FLAG BIT(11)
  74. #define QUP_OP_MAX_OUTPUT_DONE_FLAG BIT(10)
  75. #define QUP_OP_IN_SERVICE_FLAG BIT(9)
  76. #define QUP_OP_OUT_SERVICE_FLAG BIT(8)
  77. #define QUP_OP_IN_FIFO_FULL BIT(7)
  78. #define QUP_OP_OUT_FIFO_FULL BIT(6)
  79. #define QUP_OP_IN_FIFO_NOT_EMPTY BIT(5)
  80. #define QUP_OP_OUT_FIFO_NOT_EMPTY BIT(4)
  81. /* QUP_ERROR_FLAGS and QUP_ERROR_FLAGS_EN fields */
  82. #define QUP_ERROR_OUTPUT_OVER_RUN BIT(5)
  83. #define QUP_ERROR_INPUT_UNDER_RUN BIT(4)
  84. #define QUP_ERROR_OUTPUT_UNDER_RUN BIT(3)
  85. #define QUP_ERROR_INPUT_OVER_RUN BIT(2)
  86. /* SPI_CONFIG fields */
  87. #define SPI_CONFIG_HS_MODE BIT(10)
  88. #define SPI_CONFIG_INPUT_FIRST BIT(9)
  89. #define SPI_CONFIG_LOOPBACK BIT(8)
  90. /* SPI_IO_CONTROL fields */
  91. #define SPI_IO_C_FORCE_CS BIT(11)
  92. #define SPI_IO_C_CLK_IDLE_HIGH BIT(10)
  93. #define SPI_IO_C_MX_CS_MODE BIT(8)
  94. #define SPI_IO_C_CS_N_POLARITY_0 BIT(4)
  95. #define SPI_IO_C_CS_SELECT(x) (((x) & 3) << 2)
  96. #define SPI_IO_C_CS_SELECT_MASK 0x000c
  97. #define SPI_IO_C_TRISTATE_CS BIT(1)
  98. #define SPI_IO_C_NO_TRI_STATE BIT(0)
  99. /* SPI_ERROR_FLAGS and SPI_ERROR_FLAGS_EN fields */
  100. #define SPI_ERROR_CLK_OVER_RUN BIT(1)
  101. #define SPI_ERROR_CLK_UNDER_RUN BIT(0)
  102. #define SPI_NUM_CHIPSELECTS 4
  103. /* high speed mode is when bus rate is greater then 26MHz */
  104. #define SPI_HS_MIN_RATE 26000000
  105. #define SPI_MAX_RATE 50000000
  106. #define SPI_DELAY_THRESHOLD 1
  107. #define SPI_DELAY_RETRY 10
  108. struct spi_qup {
  109. void __iomem *base;
  110. struct device *dev;
  111. struct clk *cclk; /* core clock */
  112. struct clk *iclk; /* interface clock */
  113. int irq;
  114. spinlock_t lock;
  115. int in_fifo_sz;
  116. int out_fifo_sz;
  117. int in_blk_sz;
  118. int out_blk_sz;
  119. struct spi_transfer *xfer;
  120. struct completion done;
  121. int error;
  122. int w_size; /* bytes per SPI word */
  123. int tx_bytes;
  124. int rx_bytes;
  125. int qup_v1;
  126. };
  127. static inline bool spi_qup_is_valid_state(struct spi_qup *controller)
  128. {
  129. u32 opstate = readl_relaxed(controller->base + QUP_STATE);
  130. return opstate & QUP_STATE_VALID;
  131. }
  132. static int spi_qup_set_state(struct spi_qup *controller, u32 state)
  133. {
  134. unsigned long loop;
  135. u32 cur_state;
  136. loop = 0;
  137. while (!spi_qup_is_valid_state(controller)) {
  138. usleep_range(SPI_DELAY_THRESHOLD, SPI_DELAY_THRESHOLD * 2);
  139. if (++loop > SPI_DELAY_RETRY)
  140. return -EIO;
  141. }
  142. if (loop)
  143. dev_dbg(controller->dev, "invalid state for %ld,us %d\n",
  144. loop, state);
  145. cur_state = readl_relaxed(controller->base + QUP_STATE);
  146. /*
  147. * Per spec: for PAUSE_STATE to RESET_STATE, two writes
  148. * of (b10) are required
  149. */
  150. if (((cur_state & QUP_STATE_MASK) == QUP_STATE_PAUSE) &&
  151. (state == QUP_STATE_RESET)) {
  152. writel_relaxed(QUP_STATE_CLEAR, controller->base + QUP_STATE);
  153. writel_relaxed(QUP_STATE_CLEAR, controller->base + QUP_STATE);
  154. } else {
  155. cur_state &= ~QUP_STATE_MASK;
  156. cur_state |= state;
  157. writel_relaxed(cur_state, controller->base + QUP_STATE);
  158. }
  159. loop = 0;
  160. while (!spi_qup_is_valid_state(controller)) {
  161. usleep_range(SPI_DELAY_THRESHOLD, SPI_DELAY_THRESHOLD * 2);
  162. if (++loop > SPI_DELAY_RETRY)
  163. return -EIO;
  164. }
  165. return 0;
  166. }
  167. static void spi_qup_fifo_read(struct spi_qup *controller,
  168. struct spi_transfer *xfer)
  169. {
  170. u8 *rx_buf = xfer->rx_buf;
  171. u32 word, state;
  172. int idx, shift, w_size;
  173. w_size = controller->w_size;
  174. while (controller->rx_bytes < xfer->len) {
  175. state = readl_relaxed(controller->base + QUP_OPERATIONAL);
  176. if (0 == (state & QUP_OP_IN_FIFO_NOT_EMPTY))
  177. break;
  178. word = readl_relaxed(controller->base + QUP_INPUT_FIFO);
  179. if (!rx_buf) {
  180. controller->rx_bytes += w_size;
  181. continue;
  182. }
  183. for (idx = 0; idx < w_size; idx++, controller->rx_bytes++) {
  184. /*
  185. * The data format depends on bytes per SPI word:
  186. * 4 bytes: 0x12345678
  187. * 2 bytes: 0x00001234
  188. * 1 byte : 0x00000012
  189. */
  190. shift = BITS_PER_BYTE;
  191. shift *= (w_size - idx - 1);
  192. rx_buf[controller->rx_bytes] = word >> shift;
  193. }
  194. }
  195. }
  196. static void spi_qup_fifo_write(struct spi_qup *controller,
  197. struct spi_transfer *xfer)
  198. {
  199. const u8 *tx_buf = xfer->tx_buf;
  200. u32 word, state, data;
  201. int idx, w_size;
  202. w_size = controller->w_size;
  203. while (controller->tx_bytes < xfer->len) {
  204. state = readl_relaxed(controller->base + QUP_OPERATIONAL);
  205. if (state & QUP_OP_OUT_FIFO_FULL)
  206. break;
  207. word = 0;
  208. for (idx = 0; idx < w_size; idx++, controller->tx_bytes++) {
  209. if (!tx_buf) {
  210. controller->tx_bytes += w_size;
  211. break;
  212. }
  213. data = tx_buf[controller->tx_bytes];
  214. word |= data << (BITS_PER_BYTE * (3 - idx));
  215. }
  216. writel_relaxed(word, controller->base + QUP_OUTPUT_FIFO);
  217. }
  218. }
  219. static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id)
  220. {
  221. struct spi_qup *controller = dev_id;
  222. struct spi_transfer *xfer;
  223. u32 opflags, qup_err, spi_err;
  224. unsigned long flags;
  225. int error = 0;
  226. spin_lock_irqsave(&controller->lock, flags);
  227. xfer = controller->xfer;
  228. controller->xfer = NULL;
  229. spin_unlock_irqrestore(&controller->lock, flags);
  230. qup_err = readl_relaxed(controller->base + QUP_ERROR_FLAGS);
  231. spi_err = readl_relaxed(controller->base + SPI_ERROR_FLAGS);
  232. opflags = readl_relaxed(controller->base + QUP_OPERATIONAL);
  233. writel_relaxed(qup_err, controller->base + QUP_ERROR_FLAGS);
  234. writel_relaxed(spi_err, controller->base + SPI_ERROR_FLAGS);
  235. writel_relaxed(opflags, controller->base + QUP_OPERATIONAL);
  236. if (!xfer) {
  237. dev_err_ratelimited(controller->dev, "unexpected irq %08x %08x %08x\n",
  238. qup_err, spi_err, opflags);
  239. return IRQ_HANDLED;
  240. }
  241. if (qup_err) {
  242. if (qup_err & QUP_ERROR_OUTPUT_OVER_RUN)
  243. dev_warn(controller->dev, "OUTPUT_OVER_RUN\n");
  244. if (qup_err & QUP_ERROR_INPUT_UNDER_RUN)
  245. dev_warn(controller->dev, "INPUT_UNDER_RUN\n");
  246. if (qup_err & QUP_ERROR_OUTPUT_UNDER_RUN)
  247. dev_warn(controller->dev, "OUTPUT_UNDER_RUN\n");
  248. if (qup_err & QUP_ERROR_INPUT_OVER_RUN)
  249. dev_warn(controller->dev, "INPUT_OVER_RUN\n");
  250. error = -EIO;
  251. }
  252. if (spi_err) {
  253. if (spi_err & SPI_ERROR_CLK_OVER_RUN)
  254. dev_warn(controller->dev, "CLK_OVER_RUN\n");
  255. if (spi_err & SPI_ERROR_CLK_UNDER_RUN)
  256. dev_warn(controller->dev, "CLK_UNDER_RUN\n");
  257. error = -EIO;
  258. }
  259. if (opflags & QUP_OP_IN_SERVICE_FLAG)
  260. spi_qup_fifo_read(controller, xfer);
  261. if (opflags & QUP_OP_OUT_SERVICE_FLAG)
  262. spi_qup_fifo_write(controller, xfer);
  263. spin_lock_irqsave(&controller->lock, flags);
  264. controller->error = error;
  265. controller->xfer = xfer;
  266. spin_unlock_irqrestore(&controller->lock, flags);
  267. if (controller->rx_bytes == xfer->len || error)
  268. complete(&controller->done);
  269. return IRQ_HANDLED;
  270. }
  271. /* set clock freq ... bits per word */
  272. static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer)
  273. {
  274. struct spi_qup *controller = spi_master_get_devdata(spi->master);
  275. u32 config, iomode, mode;
  276. int ret, n_words, w_size;
  277. if (spi->mode & SPI_LOOP && xfer->len > controller->in_fifo_sz) {
  278. dev_err(controller->dev, "too big size for loopback %d > %d\n",
  279. xfer->len, controller->in_fifo_sz);
  280. return -EIO;
  281. }
  282. ret = clk_set_rate(controller->cclk, xfer->speed_hz);
  283. if (ret) {
  284. dev_err(controller->dev, "fail to set frequency %d",
  285. xfer->speed_hz);
  286. return -EIO;
  287. }
  288. if (spi_qup_set_state(controller, QUP_STATE_RESET)) {
  289. dev_err(controller->dev, "cannot set RESET state\n");
  290. return -EIO;
  291. }
  292. w_size = 4;
  293. if (xfer->bits_per_word <= 8)
  294. w_size = 1;
  295. else if (xfer->bits_per_word <= 16)
  296. w_size = 2;
  297. n_words = xfer->len / w_size;
  298. controller->w_size = w_size;
  299. if (n_words <= (controller->in_fifo_sz / sizeof(u32))) {
  300. mode = QUP_IO_M_MODE_FIFO;
  301. writel_relaxed(n_words, controller->base + QUP_MX_READ_CNT);
  302. writel_relaxed(n_words, controller->base + QUP_MX_WRITE_CNT);
  303. /* must be zero for FIFO */
  304. writel_relaxed(0, controller->base + QUP_MX_INPUT_CNT);
  305. writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT);
  306. } else {
  307. mode = QUP_IO_M_MODE_BLOCK;
  308. writel_relaxed(n_words, controller->base + QUP_MX_INPUT_CNT);
  309. writel_relaxed(n_words, controller->base + QUP_MX_OUTPUT_CNT);
  310. /* must be zero for BLOCK and BAM */
  311. writel_relaxed(0, controller->base + QUP_MX_READ_CNT);
  312. writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT);
  313. }
  314. iomode = readl_relaxed(controller->base + QUP_IO_M_MODES);
  315. /* Set input and output transfer mode */
  316. iomode &= ~(QUP_IO_M_INPUT_MODE_MASK | QUP_IO_M_OUTPUT_MODE_MASK);
  317. iomode &= ~(QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN);
  318. iomode |= (mode << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT);
  319. iomode |= (mode << QUP_IO_M_INPUT_MODE_MASK_SHIFT);
  320. writel_relaxed(iomode, controller->base + QUP_IO_M_MODES);
  321. config = readl_relaxed(controller->base + SPI_CONFIG);
  322. if (spi->mode & SPI_LOOP)
  323. config |= SPI_CONFIG_LOOPBACK;
  324. else
  325. config &= ~SPI_CONFIG_LOOPBACK;
  326. if (spi->mode & SPI_CPHA)
  327. config &= ~SPI_CONFIG_INPUT_FIRST;
  328. else
  329. config |= SPI_CONFIG_INPUT_FIRST;
  330. /*
  331. * HS_MODE improves signal stability for spi-clk high rates,
  332. * but is invalid in loop back mode.
  333. */
  334. if ((xfer->speed_hz >= SPI_HS_MIN_RATE) && !(spi->mode & SPI_LOOP))
  335. config |= SPI_CONFIG_HS_MODE;
  336. else
  337. config &= ~SPI_CONFIG_HS_MODE;
  338. writel_relaxed(config, controller->base + SPI_CONFIG);
  339. config = readl_relaxed(controller->base + QUP_CONFIG);
  340. config &= ~(QUP_CONFIG_NO_INPUT | QUP_CONFIG_NO_OUTPUT | QUP_CONFIG_N);
  341. config |= xfer->bits_per_word - 1;
  342. config |= QUP_CONFIG_SPI_MODE;
  343. writel_relaxed(config, controller->base + QUP_CONFIG);
  344. /* only write to OPERATIONAL_MASK when register is present */
  345. if (!controller->qup_v1)
  346. writel_relaxed(0, controller->base + QUP_OPERATIONAL_MASK);
  347. return 0;
  348. }
  349. static int spi_qup_transfer_one(struct spi_master *master,
  350. struct spi_device *spi,
  351. struct spi_transfer *xfer)
  352. {
  353. struct spi_qup *controller = spi_master_get_devdata(master);
  354. unsigned long timeout, flags;
  355. int ret = -EIO;
  356. ret = spi_qup_io_config(spi, xfer);
  357. if (ret)
  358. return ret;
  359. timeout = DIV_ROUND_UP(xfer->speed_hz, MSEC_PER_SEC);
  360. timeout = DIV_ROUND_UP(xfer->len * 8, timeout);
  361. timeout = 100 * msecs_to_jiffies(timeout);
  362. reinit_completion(&controller->done);
  363. spin_lock_irqsave(&controller->lock, flags);
  364. controller->xfer = xfer;
  365. controller->error = 0;
  366. controller->rx_bytes = 0;
  367. controller->tx_bytes = 0;
  368. spin_unlock_irqrestore(&controller->lock, flags);
  369. if (spi_qup_set_state(controller, QUP_STATE_RUN)) {
  370. dev_warn(controller->dev, "cannot set RUN state\n");
  371. goto exit;
  372. }
  373. if (spi_qup_set_state(controller, QUP_STATE_PAUSE)) {
  374. dev_warn(controller->dev, "cannot set PAUSE state\n");
  375. goto exit;
  376. }
  377. spi_qup_fifo_write(controller, xfer);
  378. if (spi_qup_set_state(controller, QUP_STATE_RUN)) {
  379. dev_warn(controller->dev, "cannot set EXECUTE state\n");
  380. goto exit;
  381. }
  382. if (!wait_for_completion_timeout(&controller->done, timeout))
  383. ret = -ETIMEDOUT;
  384. exit:
  385. spi_qup_set_state(controller, QUP_STATE_RESET);
  386. spin_lock_irqsave(&controller->lock, flags);
  387. controller->xfer = NULL;
  388. if (!ret)
  389. ret = controller->error;
  390. spin_unlock_irqrestore(&controller->lock, flags);
  391. return ret;
  392. }
  393. static int spi_qup_probe(struct platform_device *pdev)
  394. {
  395. struct spi_master *master;
  396. struct clk *iclk, *cclk;
  397. struct spi_qup *controller;
  398. struct resource *res;
  399. struct device *dev;
  400. void __iomem *base;
  401. u32 max_freq, iomode;
  402. int ret, irq, size;
  403. dev = &pdev->dev;
  404. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  405. base = devm_ioremap_resource(dev, res);
  406. if (IS_ERR(base))
  407. return PTR_ERR(base);
  408. irq = platform_get_irq(pdev, 0);
  409. if (irq < 0)
  410. return irq;
  411. cclk = devm_clk_get(dev, "core");
  412. if (IS_ERR(cclk))
  413. return PTR_ERR(cclk);
  414. iclk = devm_clk_get(dev, "iface");
  415. if (IS_ERR(iclk))
  416. return PTR_ERR(iclk);
  417. /* This is optional parameter */
  418. if (of_property_read_u32(dev->of_node, "spi-max-frequency", &max_freq))
  419. max_freq = SPI_MAX_RATE;
  420. if (!max_freq || max_freq > SPI_MAX_RATE) {
  421. dev_err(dev, "invalid clock frequency %d\n", max_freq);
  422. return -ENXIO;
  423. }
  424. ret = clk_prepare_enable(cclk);
  425. if (ret) {
  426. dev_err(dev, "cannot enable core clock\n");
  427. return ret;
  428. }
  429. ret = clk_prepare_enable(iclk);
  430. if (ret) {
  431. clk_disable_unprepare(cclk);
  432. dev_err(dev, "cannot enable iface clock\n");
  433. return ret;
  434. }
  435. master = spi_alloc_master(dev, sizeof(struct spi_qup));
  436. if (!master) {
  437. clk_disable_unprepare(cclk);
  438. clk_disable_unprepare(iclk);
  439. dev_err(dev, "cannot allocate master\n");
  440. return -ENOMEM;
  441. }
  442. /* use num-cs unless not present or out of range */
  443. if (of_property_read_u16(dev->of_node, "num-cs",
  444. &master->num_chipselect) ||
  445. (master->num_chipselect > SPI_NUM_CHIPSELECTS))
  446. master->num_chipselect = SPI_NUM_CHIPSELECTS;
  447. master->bus_num = pdev->id;
  448. master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
  449. master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
  450. master->max_speed_hz = max_freq;
  451. master->transfer_one = spi_qup_transfer_one;
  452. master->dev.of_node = pdev->dev.of_node;
  453. master->auto_runtime_pm = true;
  454. platform_set_drvdata(pdev, master);
  455. controller = spi_master_get_devdata(master);
  456. controller->dev = dev;
  457. controller->base = base;
  458. controller->iclk = iclk;
  459. controller->cclk = cclk;
  460. controller->irq = irq;
  461. /* set v1 flag if device is version 1 */
  462. if (of_device_is_compatible(dev->of_node, "qcom,spi-qup-v1.1.1"))
  463. controller->qup_v1 = 1;
  464. spin_lock_init(&controller->lock);
  465. init_completion(&controller->done);
  466. iomode = readl_relaxed(base + QUP_IO_M_MODES);
  467. size = QUP_IO_M_OUTPUT_BLOCK_SIZE(iomode);
  468. if (size)
  469. controller->out_blk_sz = size * 16;
  470. else
  471. controller->out_blk_sz = 4;
  472. size = QUP_IO_M_INPUT_BLOCK_SIZE(iomode);
  473. if (size)
  474. controller->in_blk_sz = size * 16;
  475. else
  476. controller->in_blk_sz = 4;
  477. size = QUP_IO_M_OUTPUT_FIFO_SIZE(iomode);
  478. controller->out_fifo_sz = controller->out_blk_sz * (2 << size);
  479. size = QUP_IO_M_INPUT_FIFO_SIZE(iomode);
  480. controller->in_fifo_sz = controller->in_blk_sz * (2 << size);
  481. dev_info(dev, "IN:block:%d, fifo:%d, OUT:block:%d, fifo:%d\n",
  482. controller->in_blk_sz, controller->in_fifo_sz,
  483. controller->out_blk_sz, controller->out_fifo_sz);
  484. writel_relaxed(1, base + QUP_SW_RESET);
  485. ret = spi_qup_set_state(controller, QUP_STATE_RESET);
  486. if (ret) {
  487. dev_err(dev, "cannot set RESET state\n");
  488. goto error;
  489. }
  490. writel_relaxed(0, base + QUP_OPERATIONAL);
  491. writel_relaxed(0, base + QUP_IO_M_MODES);
  492. if (!controller->qup_v1)
  493. writel_relaxed(0, base + QUP_OPERATIONAL_MASK);
  494. writel_relaxed(SPI_ERROR_CLK_UNDER_RUN | SPI_ERROR_CLK_OVER_RUN,
  495. base + SPI_ERROR_FLAGS_EN);
  496. /* if earlier version of the QUP, disable INPUT_OVERRUN */
  497. if (controller->qup_v1)
  498. writel_relaxed(QUP_ERROR_OUTPUT_OVER_RUN |
  499. QUP_ERROR_INPUT_UNDER_RUN | QUP_ERROR_OUTPUT_UNDER_RUN,
  500. base + QUP_ERROR_FLAGS_EN);
  501. writel_relaxed(0, base + SPI_CONFIG);
  502. writel_relaxed(SPI_IO_C_NO_TRI_STATE, base + SPI_IO_CONTROL);
  503. ret = devm_request_irq(dev, irq, spi_qup_qup_irq,
  504. IRQF_TRIGGER_HIGH, pdev->name, controller);
  505. if (ret)
  506. goto error;
  507. pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
  508. pm_runtime_use_autosuspend(dev);
  509. pm_runtime_set_active(dev);
  510. pm_runtime_enable(dev);
  511. ret = devm_spi_register_master(dev, master);
  512. if (ret)
  513. goto disable_pm;
  514. return 0;
  515. disable_pm:
  516. pm_runtime_disable(&pdev->dev);
  517. error:
  518. clk_disable_unprepare(cclk);
  519. clk_disable_unprepare(iclk);
  520. spi_master_put(master);
  521. return ret;
  522. }
  523. #ifdef CONFIG_PM_RUNTIME
  524. static int spi_qup_pm_suspend_runtime(struct device *device)
  525. {
  526. struct spi_master *master = dev_get_drvdata(device);
  527. struct spi_qup *controller = spi_master_get_devdata(master);
  528. u32 config;
  529. /* Enable clocks auto gaiting */
  530. config = readl(controller->base + QUP_CONFIG);
  531. config |= QUP_CONFIG_CLOCK_AUTO_GATE;
  532. writel_relaxed(config, controller->base + QUP_CONFIG);
  533. return 0;
  534. }
  535. static int spi_qup_pm_resume_runtime(struct device *device)
  536. {
  537. struct spi_master *master = dev_get_drvdata(device);
  538. struct spi_qup *controller = spi_master_get_devdata(master);
  539. u32 config;
  540. /* Disable clocks auto gaiting */
  541. config = readl_relaxed(controller->base + QUP_CONFIG);
  542. config &= ~QUP_CONFIG_CLOCK_AUTO_GATE;
  543. writel_relaxed(config, controller->base + QUP_CONFIG);
  544. return 0;
  545. }
  546. #endif /* CONFIG_PM_RUNTIME */
  547. #ifdef CONFIG_PM_SLEEP
  548. static int spi_qup_suspend(struct device *device)
  549. {
  550. struct spi_master *master = dev_get_drvdata(device);
  551. struct spi_qup *controller = spi_master_get_devdata(master);
  552. int ret;
  553. ret = spi_master_suspend(master);
  554. if (ret)
  555. return ret;
  556. ret = spi_qup_set_state(controller, QUP_STATE_RESET);
  557. if (ret)
  558. return ret;
  559. clk_disable_unprepare(controller->cclk);
  560. clk_disable_unprepare(controller->iclk);
  561. return 0;
  562. }
  563. static int spi_qup_resume(struct device *device)
  564. {
  565. struct spi_master *master = dev_get_drvdata(device);
  566. struct spi_qup *controller = spi_master_get_devdata(master);
  567. int ret;
  568. ret = clk_prepare_enable(controller->iclk);
  569. if (ret)
  570. return ret;
  571. ret = clk_prepare_enable(controller->cclk);
  572. if (ret)
  573. return ret;
  574. ret = spi_qup_set_state(controller, QUP_STATE_RESET);
  575. if (ret)
  576. return ret;
  577. return spi_master_resume(master);
  578. }
  579. #endif /* CONFIG_PM_SLEEP */
  580. static int spi_qup_remove(struct platform_device *pdev)
  581. {
  582. struct spi_master *master = dev_get_drvdata(&pdev->dev);
  583. struct spi_qup *controller = spi_master_get_devdata(master);
  584. int ret;
  585. ret = pm_runtime_get_sync(&pdev->dev);
  586. if (ret < 0)
  587. return ret;
  588. ret = spi_qup_set_state(controller, QUP_STATE_RESET);
  589. if (ret)
  590. return ret;
  591. clk_disable_unprepare(controller->cclk);
  592. clk_disable_unprepare(controller->iclk);
  593. pm_runtime_put_noidle(&pdev->dev);
  594. pm_runtime_disable(&pdev->dev);
  595. return 0;
  596. }
  597. static const struct of_device_id spi_qup_dt_match[] = {
  598. { .compatible = "qcom,spi-qup-v1.1.1", },
  599. { .compatible = "qcom,spi-qup-v2.1.1", },
  600. { .compatible = "qcom,spi-qup-v2.2.1", },
  601. { }
  602. };
  603. MODULE_DEVICE_TABLE(of, spi_qup_dt_match);
  604. static const struct dev_pm_ops spi_qup_dev_pm_ops = {
  605. SET_SYSTEM_SLEEP_PM_OPS(spi_qup_suspend, spi_qup_resume)
  606. SET_RUNTIME_PM_OPS(spi_qup_pm_suspend_runtime,
  607. spi_qup_pm_resume_runtime,
  608. NULL)
  609. };
  610. static struct platform_driver spi_qup_driver = {
  611. .driver = {
  612. .name = "spi_qup",
  613. .owner = THIS_MODULE,
  614. .pm = &spi_qup_dev_pm_ops,
  615. .of_match_table = spi_qup_dt_match,
  616. },
  617. .probe = spi_qup_probe,
  618. .remove = spi_qup_remove,
  619. };
  620. module_platform_driver(spi_qup_driver);
  621. MODULE_LICENSE("GPL v2");
  622. MODULE_ALIAS("platform:spi_qup");