i2c-at91.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904
  1. /*
  2. * i2c Support for Atmel's AT91 Two-Wire Interface (TWI)
  3. *
  4. * Copyright (C) 2011 Weinmann Medical GmbH
  5. * Author: Nikolaus Voss <n.voss@weinmann.de>
  6. *
  7. * Evolved from original work by:
  8. * Copyright (C) 2004 Rick Bronson
  9. * Converted to 2.6 by Andrew Victor <andrew@sanpeople.com>
  10. *
  11. * Borrowed heavily from original work by:
  12. * Copyright (C) 2000 Philip Edelbrock <phil@stimpy.netroedge.com>
  13. *
  14. * This program is free software; you can redistribute it and/or modify
  15. * it under the terms of the GNU General Public License as published by
  16. * the Free Software Foundation; either version 2 of the License, or
  17. * (at your option) any later version.
  18. */
  19. #include <linux/clk.h>
  20. #include <linux/completion.h>
  21. #include <linux/dma-mapping.h>
  22. #include <linux/dmaengine.h>
  23. #include <linux/err.h>
  24. #include <linux/i2c.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/io.h>
  27. #include <linux/module.h>
  28. #include <linux/of.h>
  29. #include <linux/of_device.h>
  30. #include <linux/platform_device.h>
  31. #include <linux/slab.h>
  32. #include <linux/platform_data/dma-atmel.h>
  33. #include <linux/pm_runtime.h>
  34. #include <linux/pinctrl/consumer.h>
  35. #define DEFAULT_TWI_CLK_HZ 100000 /* max 400 Kbits/s */
  36. #define AT91_I2C_TIMEOUT msecs_to_jiffies(100) /* transfer timeout */
  37. #define AT91_I2C_DMA_THRESHOLD 8 /* enable DMA if transfer size is bigger than this threshold */
  38. #define AUTOSUSPEND_TIMEOUT 2000
  39. /* AT91 TWI register definitions */
  40. #define AT91_TWI_CR 0x0000 /* Control Register */
  41. #define AT91_TWI_START 0x0001 /* Send a Start Condition */
  42. #define AT91_TWI_STOP 0x0002 /* Send a Stop Condition */
  43. #define AT91_TWI_MSEN 0x0004 /* Master Transfer Enable */
  44. #define AT91_TWI_SVDIS 0x0020 /* Slave Transfer Disable */
  45. #define AT91_TWI_QUICK 0x0040 /* SMBus quick command */
  46. #define AT91_TWI_SWRST 0x0080 /* Software Reset */
  47. #define AT91_TWI_MMR 0x0004 /* Master Mode Register */
  48. #define AT91_TWI_IADRSZ_1 0x0100 /* Internal Device Address Size */
  49. #define AT91_TWI_MREAD 0x1000 /* Master Read Direction */
  50. #define AT91_TWI_IADR 0x000c /* Internal Address Register */
  51. #define AT91_TWI_CWGR 0x0010 /* Clock Waveform Generator Reg */
  52. #define AT91_TWI_SR 0x0020 /* Status Register */
  53. #define AT91_TWI_TXCOMP 0x0001 /* Transmission Complete */
  54. #define AT91_TWI_RXRDY 0x0002 /* Receive Holding Register Ready */
  55. #define AT91_TWI_TXRDY 0x0004 /* Transmit Holding Register Ready */
  56. #define AT91_TWI_OVRE 0x0040 /* Overrun Error */
  57. #define AT91_TWI_UNRE 0x0080 /* Underrun Error */
  58. #define AT91_TWI_NACK 0x0100 /* Not Acknowledged */
  59. #define AT91_TWI_IER 0x0024 /* Interrupt Enable Register */
  60. #define AT91_TWI_IDR 0x0028 /* Interrupt Disable Register */
  61. #define AT91_TWI_IMR 0x002c /* Interrupt Mask Register */
  62. #define AT91_TWI_RHR 0x0030 /* Receive Holding Register */
  63. #define AT91_TWI_THR 0x0034 /* Transmit Holding Register */
  64. struct at91_twi_pdata {
  65. unsigned clk_max_div;
  66. unsigned clk_offset;
  67. bool has_unre_flag;
  68. struct at_dma_slave dma_slave;
  69. };
  70. struct at91_twi_dma {
  71. struct dma_chan *chan_rx;
  72. struct dma_chan *chan_tx;
  73. struct scatterlist sg;
  74. struct dma_async_tx_descriptor *data_desc;
  75. enum dma_data_direction direction;
  76. bool buf_mapped;
  77. bool xfer_in_progress;
  78. };
  79. struct at91_twi_dev {
  80. struct device *dev;
  81. void __iomem *base;
  82. struct completion cmd_complete;
  83. struct clk *clk;
  84. u8 *buf;
  85. size_t buf_len;
  86. struct i2c_msg *msg;
  87. int irq;
  88. unsigned imr;
  89. unsigned transfer_status;
  90. struct i2c_adapter adapter;
  91. unsigned twi_cwgr_reg;
  92. struct at91_twi_pdata *pdata;
  93. bool use_dma;
  94. bool recv_len_abort;
  95. struct at91_twi_dma dma;
  96. };
  97. static unsigned at91_twi_read(struct at91_twi_dev *dev, unsigned reg)
  98. {
  99. return readl_relaxed(dev->base + reg);
  100. }
  101. static void at91_twi_write(struct at91_twi_dev *dev, unsigned reg, unsigned val)
  102. {
  103. writel_relaxed(val, dev->base + reg);
  104. }
  105. static void at91_disable_twi_interrupts(struct at91_twi_dev *dev)
  106. {
  107. at91_twi_write(dev, AT91_TWI_IDR,
  108. AT91_TWI_TXCOMP | AT91_TWI_RXRDY | AT91_TWI_TXRDY);
  109. }
  110. static void at91_twi_irq_save(struct at91_twi_dev *dev)
  111. {
  112. dev->imr = at91_twi_read(dev, AT91_TWI_IMR) & 0x7;
  113. at91_disable_twi_interrupts(dev);
  114. }
  115. static void at91_twi_irq_restore(struct at91_twi_dev *dev)
  116. {
  117. at91_twi_write(dev, AT91_TWI_IER, dev->imr);
  118. }
  119. static void at91_init_twi_bus(struct at91_twi_dev *dev)
  120. {
  121. at91_disable_twi_interrupts(dev);
  122. at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_SWRST);
  123. at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_MSEN);
  124. at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_SVDIS);
  125. at91_twi_write(dev, AT91_TWI_CWGR, dev->twi_cwgr_reg);
  126. }
  127. /*
  128. * Calculate symmetric clock as stated in datasheet:
  129. * twi_clk = F_MAIN / (2 * (cdiv * (1 << ckdiv) + offset))
  130. */
  131. static void at91_calc_twi_clock(struct at91_twi_dev *dev, int twi_clk)
  132. {
  133. int ckdiv, cdiv, div;
  134. struct at91_twi_pdata *pdata = dev->pdata;
  135. int offset = pdata->clk_offset;
  136. int max_ckdiv = pdata->clk_max_div;
  137. div = max(0, (int)DIV_ROUND_UP(clk_get_rate(dev->clk),
  138. 2 * twi_clk) - offset);
  139. ckdiv = fls(div >> 8);
  140. cdiv = div >> ckdiv;
  141. if (ckdiv > max_ckdiv) {
  142. dev_warn(dev->dev, "%d exceeds ckdiv max value which is %d.\n",
  143. ckdiv, max_ckdiv);
  144. ckdiv = max_ckdiv;
  145. cdiv = 255;
  146. }
  147. dev->twi_cwgr_reg = (ckdiv << 16) | (cdiv << 8) | cdiv;
  148. dev_dbg(dev->dev, "cdiv %d ckdiv %d\n", cdiv, ckdiv);
  149. }
  150. static void at91_twi_dma_cleanup(struct at91_twi_dev *dev)
  151. {
  152. struct at91_twi_dma *dma = &dev->dma;
  153. at91_twi_irq_save(dev);
  154. if (dma->xfer_in_progress) {
  155. if (dma->direction == DMA_FROM_DEVICE)
  156. dmaengine_terminate_all(dma->chan_rx);
  157. else
  158. dmaengine_terminate_all(dma->chan_tx);
  159. dma->xfer_in_progress = false;
  160. }
  161. if (dma->buf_mapped) {
  162. dma_unmap_single(dev->dev, sg_dma_address(&dma->sg),
  163. dev->buf_len, dma->direction);
  164. dma->buf_mapped = false;
  165. }
  166. at91_twi_irq_restore(dev);
  167. }
  168. static void at91_twi_write_next_byte(struct at91_twi_dev *dev)
  169. {
  170. if (dev->buf_len <= 0)
  171. return;
  172. at91_twi_write(dev, AT91_TWI_THR, *dev->buf);
  173. /* send stop when last byte has been written */
  174. if (--dev->buf_len == 0)
  175. at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
  176. dev_dbg(dev->dev, "wrote 0x%x, to go %d\n", *dev->buf, dev->buf_len);
  177. ++dev->buf;
  178. }
  179. static void at91_twi_write_data_dma_callback(void *data)
  180. {
  181. struct at91_twi_dev *dev = (struct at91_twi_dev *)data;
  182. dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg),
  183. dev->buf_len, DMA_TO_DEVICE);
  184. at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
  185. }
  186. static void at91_twi_write_data_dma(struct at91_twi_dev *dev)
  187. {
  188. dma_addr_t dma_addr;
  189. struct dma_async_tx_descriptor *txdesc;
  190. struct at91_twi_dma *dma = &dev->dma;
  191. struct dma_chan *chan_tx = dma->chan_tx;
  192. if (dev->buf_len <= 0)
  193. return;
  194. dma->direction = DMA_TO_DEVICE;
  195. at91_twi_irq_save(dev);
  196. dma_addr = dma_map_single(dev->dev, dev->buf, dev->buf_len,
  197. DMA_TO_DEVICE);
  198. if (dma_mapping_error(dev->dev, dma_addr)) {
  199. dev_err(dev->dev, "dma map failed\n");
  200. return;
  201. }
  202. dma->buf_mapped = true;
  203. at91_twi_irq_restore(dev);
  204. sg_dma_len(&dma->sg) = dev->buf_len;
  205. sg_dma_address(&dma->sg) = dma_addr;
  206. txdesc = dmaengine_prep_slave_sg(chan_tx, &dma->sg, 1, DMA_MEM_TO_DEV,
  207. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  208. if (!txdesc) {
  209. dev_err(dev->dev, "dma prep slave sg failed\n");
  210. goto error;
  211. }
  212. txdesc->callback = at91_twi_write_data_dma_callback;
  213. txdesc->callback_param = dev;
  214. dma->xfer_in_progress = true;
  215. dmaengine_submit(txdesc);
  216. dma_async_issue_pending(chan_tx);
  217. return;
  218. error:
  219. at91_twi_dma_cleanup(dev);
  220. }
  221. static void at91_twi_read_next_byte(struct at91_twi_dev *dev)
  222. {
  223. if (dev->buf_len <= 0)
  224. return;
  225. *dev->buf = at91_twi_read(dev, AT91_TWI_RHR) & 0xff;
  226. --dev->buf_len;
  227. /* return if aborting, we only needed to read RHR to clear RXRDY*/
  228. if (dev->recv_len_abort)
  229. return;
  230. /* handle I2C_SMBUS_BLOCK_DATA */
  231. if (unlikely(dev->msg->flags & I2C_M_RECV_LEN)) {
  232. /* ensure length byte is a valid value */
  233. if (*dev->buf <= I2C_SMBUS_BLOCK_MAX && *dev->buf > 0) {
  234. dev->msg->flags &= ~I2C_M_RECV_LEN;
  235. dev->buf_len += *dev->buf;
  236. dev->msg->len = dev->buf_len + 1;
  237. dev_dbg(dev->dev, "received block length %d\n",
  238. dev->buf_len);
  239. } else {
  240. /* abort and send the stop by reading one more byte */
  241. dev->recv_len_abort = true;
  242. dev->buf_len = 1;
  243. }
  244. }
  245. /* send stop if second but last byte has been read */
  246. if (dev->buf_len == 1)
  247. at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
  248. dev_dbg(dev->dev, "read 0x%x, to go %d\n", *dev->buf, dev->buf_len);
  249. ++dev->buf;
  250. }
  251. static void at91_twi_read_data_dma_callback(void *data)
  252. {
  253. struct at91_twi_dev *dev = (struct at91_twi_dev *)data;
  254. dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg),
  255. dev->buf_len, DMA_FROM_DEVICE);
  256. /* The last two bytes have to be read without using dma */
  257. dev->buf += dev->buf_len - 2;
  258. dev->buf_len = 2;
  259. at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_RXRDY);
  260. }
  261. static void at91_twi_read_data_dma(struct at91_twi_dev *dev)
  262. {
  263. dma_addr_t dma_addr;
  264. struct dma_async_tx_descriptor *rxdesc;
  265. struct at91_twi_dma *dma = &dev->dma;
  266. struct dma_chan *chan_rx = dma->chan_rx;
  267. dma->direction = DMA_FROM_DEVICE;
  268. /* Keep in mind that we won't use dma to read the last two bytes */
  269. at91_twi_irq_save(dev);
  270. dma_addr = dma_map_single(dev->dev, dev->buf, dev->buf_len - 2,
  271. DMA_FROM_DEVICE);
  272. if (dma_mapping_error(dev->dev, dma_addr)) {
  273. dev_err(dev->dev, "dma map failed\n");
  274. return;
  275. }
  276. dma->buf_mapped = true;
  277. at91_twi_irq_restore(dev);
  278. dma->sg.dma_address = dma_addr;
  279. sg_dma_len(&dma->sg) = dev->buf_len - 2;
  280. rxdesc = dmaengine_prep_slave_sg(chan_rx, &dma->sg, 1, DMA_DEV_TO_MEM,
  281. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  282. if (!rxdesc) {
  283. dev_err(dev->dev, "dma prep slave sg failed\n");
  284. goto error;
  285. }
  286. rxdesc->callback = at91_twi_read_data_dma_callback;
  287. rxdesc->callback_param = dev;
  288. dma->xfer_in_progress = true;
  289. dmaengine_submit(rxdesc);
  290. dma_async_issue_pending(dma->chan_rx);
  291. return;
  292. error:
  293. at91_twi_dma_cleanup(dev);
  294. }
  295. static irqreturn_t atmel_twi_interrupt(int irq, void *dev_id)
  296. {
  297. struct at91_twi_dev *dev = dev_id;
  298. const unsigned status = at91_twi_read(dev, AT91_TWI_SR);
  299. const unsigned irqstatus = status & at91_twi_read(dev, AT91_TWI_IMR);
  300. if (!irqstatus)
  301. return IRQ_NONE;
  302. else if (irqstatus & AT91_TWI_RXRDY)
  303. at91_twi_read_next_byte(dev);
  304. else if (irqstatus & AT91_TWI_TXRDY)
  305. at91_twi_write_next_byte(dev);
  306. /* catch error flags */
  307. dev->transfer_status |= status;
  308. if (irqstatus & AT91_TWI_TXCOMP) {
  309. at91_disable_twi_interrupts(dev);
  310. complete(&dev->cmd_complete);
  311. }
  312. return IRQ_HANDLED;
  313. }
  314. static int at91_do_twi_transfer(struct at91_twi_dev *dev)
  315. {
  316. int ret;
  317. bool has_unre_flag = dev->pdata->has_unre_flag;
  318. dev_dbg(dev->dev, "transfer: %s %d bytes.\n",
  319. (dev->msg->flags & I2C_M_RD) ? "read" : "write", dev->buf_len);
  320. reinit_completion(&dev->cmd_complete);
  321. dev->transfer_status = 0;
  322. if (!dev->buf_len) {
  323. at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_QUICK);
  324. at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP);
  325. } else if (dev->msg->flags & I2C_M_RD) {
  326. unsigned start_flags = AT91_TWI_START;
  327. if (at91_twi_read(dev, AT91_TWI_SR) & AT91_TWI_RXRDY) {
  328. dev_err(dev->dev, "RXRDY still set!");
  329. at91_twi_read(dev, AT91_TWI_RHR);
  330. }
  331. /* if only one byte is to be read, immediately stop transfer */
  332. if (dev->buf_len <= 1 && !(dev->msg->flags & I2C_M_RECV_LEN))
  333. start_flags |= AT91_TWI_STOP;
  334. at91_twi_write(dev, AT91_TWI_CR, start_flags);
  335. /*
  336. * When using dma, the last byte has to be read manually in
  337. * order to not send the stop command too late and then
  338. * to receive extra data. In practice, there are some issues
  339. * if you use the dma to read n-1 bytes because of latency.
  340. * Reading n-2 bytes with dma and the two last ones manually
  341. * seems to be the best solution.
  342. */
  343. if (dev->use_dma && (dev->buf_len > AT91_I2C_DMA_THRESHOLD)) {
  344. at91_twi_read_data_dma(dev);
  345. /*
  346. * It is important to enable TXCOMP irq here because
  347. * doing it only when transferring the last two bytes
  348. * will mask NACK errors since TXCOMP is set when a
  349. * NACK occurs.
  350. */
  351. at91_twi_write(dev, AT91_TWI_IER,
  352. AT91_TWI_TXCOMP);
  353. } else
  354. at91_twi_write(dev, AT91_TWI_IER,
  355. AT91_TWI_TXCOMP | AT91_TWI_RXRDY);
  356. } else {
  357. if (dev->use_dma && (dev->buf_len > AT91_I2C_DMA_THRESHOLD)) {
  358. at91_twi_write_data_dma(dev);
  359. at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP);
  360. } else {
  361. at91_twi_write_next_byte(dev);
  362. at91_twi_write(dev, AT91_TWI_IER,
  363. AT91_TWI_TXCOMP | AT91_TWI_TXRDY);
  364. }
  365. }
  366. ret = wait_for_completion_timeout(&dev->cmd_complete,
  367. dev->adapter.timeout);
  368. if (ret == 0) {
  369. dev_err(dev->dev, "controller timed out\n");
  370. at91_init_twi_bus(dev);
  371. ret = -ETIMEDOUT;
  372. goto error;
  373. }
  374. if (dev->transfer_status & AT91_TWI_NACK) {
  375. dev_dbg(dev->dev, "received nack\n");
  376. ret = -EREMOTEIO;
  377. goto error;
  378. }
  379. if (dev->transfer_status & AT91_TWI_OVRE) {
  380. dev_err(dev->dev, "overrun while reading\n");
  381. ret = -EIO;
  382. goto error;
  383. }
  384. if (has_unre_flag && dev->transfer_status & AT91_TWI_UNRE) {
  385. dev_err(dev->dev, "underrun while writing\n");
  386. ret = -EIO;
  387. goto error;
  388. }
  389. if (dev->recv_len_abort) {
  390. dev_err(dev->dev, "invalid smbus block length recvd\n");
  391. ret = -EPROTO;
  392. goto error;
  393. }
  394. dev_dbg(dev->dev, "transfer complete\n");
  395. return 0;
  396. error:
  397. at91_twi_dma_cleanup(dev);
  398. return ret;
  399. }
  400. static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num)
  401. {
  402. struct at91_twi_dev *dev = i2c_get_adapdata(adap);
  403. int ret;
  404. unsigned int_addr_flag = 0;
  405. struct i2c_msg *m_start = msg;
  406. dev_dbg(&adap->dev, "at91_xfer: processing %d messages:\n", num);
  407. ret = pm_runtime_get_sync(dev->dev);
  408. if (ret < 0)
  409. goto out;
  410. /*
  411. * The hardware can handle at most two messages concatenated by a
  412. * repeated start via it's internal address feature.
  413. */
  414. if (num > 2) {
  415. dev_err(dev->dev,
  416. "cannot handle more than two concatenated messages.\n");
  417. ret = 0;
  418. goto out;
  419. } else if (num == 2) {
  420. int internal_address = 0;
  421. int i;
  422. if (msg->flags & I2C_M_RD) {
  423. dev_err(dev->dev, "first transfer must be write.\n");
  424. ret = -EINVAL;
  425. goto out;
  426. }
  427. if (msg->len > 3) {
  428. dev_err(dev->dev, "first message size must be <= 3.\n");
  429. ret = -EINVAL;
  430. goto out;
  431. }
  432. /* 1st msg is put into the internal address, start with 2nd */
  433. m_start = &msg[1];
  434. for (i = 0; i < msg->len; ++i) {
  435. const unsigned addr = msg->buf[msg->len - 1 - i];
  436. internal_address |= addr << (8 * i);
  437. int_addr_flag += AT91_TWI_IADRSZ_1;
  438. }
  439. at91_twi_write(dev, AT91_TWI_IADR, internal_address);
  440. }
  441. at91_twi_write(dev, AT91_TWI_MMR, (m_start->addr << 16) | int_addr_flag
  442. | ((m_start->flags & I2C_M_RD) ? AT91_TWI_MREAD : 0));
  443. dev->buf_len = m_start->len;
  444. dev->buf = m_start->buf;
  445. dev->msg = m_start;
  446. dev->recv_len_abort = false;
  447. ret = at91_do_twi_transfer(dev);
  448. ret = (ret < 0) ? ret : num;
  449. out:
  450. pm_runtime_mark_last_busy(dev->dev);
  451. pm_runtime_put_autosuspend(dev->dev);
  452. return ret;
  453. }
  454. static u32 at91_twi_func(struct i2c_adapter *adapter)
  455. {
  456. return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL
  457. | I2C_FUNC_SMBUS_READ_BLOCK_DATA;
  458. }
  459. static struct i2c_algorithm at91_twi_algorithm = {
  460. .master_xfer = at91_twi_xfer,
  461. .functionality = at91_twi_func,
  462. };
  463. static struct at91_twi_pdata at91rm9200_config = {
  464. .clk_max_div = 5,
  465. .clk_offset = 3,
  466. .has_unre_flag = true,
  467. };
  468. static struct at91_twi_pdata at91sam9261_config = {
  469. .clk_max_div = 5,
  470. .clk_offset = 4,
  471. .has_unre_flag = false,
  472. };
  473. static struct at91_twi_pdata at91sam9260_config = {
  474. .clk_max_div = 7,
  475. .clk_offset = 4,
  476. .has_unre_flag = false,
  477. };
  478. static struct at91_twi_pdata at91sam9g20_config = {
  479. .clk_max_div = 7,
  480. .clk_offset = 4,
  481. .has_unre_flag = false,
  482. };
  483. static struct at91_twi_pdata at91sam9g10_config = {
  484. .clk_max_div = 7,
  485. .clk_offset = 4,
  486. .has_unre_flag = false,
  487. };
  488. static const struct platform_device_id at91_twi_devtypes[] = {
  489. {
  490. .name = "i2c-at91rm9200",
  491. .driver_data = (unsigned long) &at91rm9200_config,
  492. }, {
  493. .name = "i2c-at91sam9261",
  494. .driver_data = (unsigned long) &at91sam9261_config,
  495. }, {
  496. .name = "i2c-at91sam9260",
  497. .driver_data = (unsigned long) &at91sam9260_config,
  498. }, {
  499. .name = "i2c-at91sam9g20",
  500. .driver_data = (unsigned long) &at91sam9g20_config,
  501. }, {
  502. .name = "i2c-at91sam9g10",
  503. .driver_data = (unsigned long) &at91sam9g10_config,
  504. }, {
  505. /* sentinel */
  506. }
  507. };
  508. #if defined(CONFIG_OF)
  509. static struct at91_twi_pdata at91sam9x5_config = {
  510. .clk_max_div = 7,
  511. .clk_offset = 4,
  512. .has_unre_flag = false,
  513. };
  514. static const struct of_device_id atmel_twi_dt_ids[] = {
  515. {
  516. .compatible = "atmel,at91rm9200-i2c",
  517. .data = &at91rm9200_config,
  518. } , {
  519. .compatible = "atmel,at91sam9260-i2c",
  520. .data = &at91sam9260_config,
  521. } , {
  522. .compatible = "atmel,at91sam9261-i2c",
  523. .data = &at91sam9261_config,
  524. } , {
  525. .compatible = "atmel,at91sam9g20-i2c",
  526. .data = &at91sam9g20_config,
  527. } , {
  528. .compatible = "atmel,at91sam9g10-i2c",
  529. .data = &at91sam9g10_config,
  530. }, {
  531. .compatible = "atmel,at91sam9x5-i2c",
  532. .data = &at91sam9x5_config,
  533. }, {
  534. /* sentinel */
  535. }
  536. };
  537. MODULE_DEVICE_TABLE(of, atmel_twi_dt_ids);
  538. #endif
  539. static int at91_twi_configure_dma(struct at91_twi_dev *dev, u32 phy_addr)
  540. {
  541. int ret = 0;
  542. struct dma_slave_config slave_config;
  543. struct at91_twi_dma *dma = &dev->dma;
  544. memset(&slave_config, 0, sizeof(slave_config));
  545. slave_config.src_addr = (dma_addr_t)phy_addr + AT91_TWI_RHR;
  546. slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
  547. slave_config.src_maxburst = 1;
  548. slave_config.dst_addr = (dma_addr_t)phy_addr + AT91_TWI_THR;
  549. slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
  550. slave_config.dst_maxburst = 1;
  551. slave_config.device_fc = false;
  552. dma->chan_tx = dma_request_slave_channel_reason(dev->dev, "tx");
  553. if (IS_ERR(dma->chan_tx)) {
  554. ret = PTR_ERR(dma->chan_tx);
  555. dma->chan_tx = NULL;
  556. goto error;
  557. }
  558. dma->chan_rx = dma_request_slave_channel_reason(dev->dev, "rx");
  559. if (IS_ERR(dma->chan_rx)) {
  560. ret = PTR_ERR(dma->chan_rx);
  561. dma->chan_rx = NULL;
  562. goto error;
  563. }
  564. slave_config.direction = DMA_MEM_TO_DEV;
  565. if (dmaengine_slave_config(dma->chan_tx, &slave_config)) {
  566. dev_err(dev->dev, "failed to configure tx channel\n");
  567. ret = -EINVAL;
  568. goto error;
  569. }
  570. slave_config.direction = DMA_DEV_TO_MEM;
  571. if (dmaengine_slave_config(dma->chan_rx, &slave_config)) {
  572. dev_err(dev->dev, "failed to configure rx channel\n");
  573. ret = -EINVAL;
  574. goto error;
  575. }
  576. sg_init_table(&dma->sg, 1);
  577. dma->buf_mapped = false;
  578. dma->xfer_in_progress = false;
  579. dev->use_dma = true;
  580. dev_info(dev->dev, "using %s (tx) and %s (rx) for DMA transfers\n",
  581. dma_chan_name(dma->chan_tx), dma_chan_name(dma->chan_rx));
  582. return ret;
  583. error:
  584. if (ret != -EPROBE_DEFER)
  585. dev_info(dev->dev, "can't use DMA, error %d\n", ret);
  586. if (dma->chan_rx)
  587. dma_release_channel(dma->chan_rx);
  588. if (dma->chan_tx)
  589. dma_release_channel(dma->chan_tx);
  590. return ret;
  591. }
  592. static struct at91_twi_pdata *at91_twi_get_driver_data(
  593. struct platform_device *pdev)
  594. {
  595. if (pdev->dev.of_node) {
  596. const struct of_device_id *match;
  597. match = of_match_node(atmel_twi_dt_ids, pdev->dev.of_node);
  598. if (!match)
  599. return NULL;
  600. return (struct at91_twi_pdata *)match->data;
  601. }
  602. return (struct at91_twi_pdata *) platform_get_device_id(pdev)->driver_data;
  603. }
  604. static int at91_twi_probe(struct platform_device *pdev)
  605. {
  606. struct at91_twi_dev *dev;
  607. struct resource *mem;
  608. int rc;
  609. u32 phy_addr;
  610. u32 bus_clk_rate;
  611. dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
  612. if (!dev)
  613. return -ENOMEM;
  614. init_completion(&dev->cmd_complete);
  615. dev->dev = &pdev->dev;
  616. mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  617. if (!mem)
  618. return -ENODEV;
  619. phy_addr = mem->start;
  620. dev->pdata = at91_twi_get_driver_data(pdev);
  621. if (!dev->pdata)
  622. return -ENODEV;
  623. dev->base = devm_ioremap_resource(&pdev->dev, mem);
  624. if (IS_ERR(dev->base))
  625. return PTR_ERR(dev->base);
  626. dev->irq = platform_get_irq(pdev, 0);
  627. if (dev->irq < 0)
  628. return dev->irq;
  629. rc = devm_request_irq(&pdev->dev, dev->irq, atmel_twi_interrupt, 0,
  630. dev_name(dev->dev), dev);
  631. if (rc) {
  632. dev_err(dev->dev, "Cannot get irq %d: %d\n", dev->irq, rc);
  633. return rc;
  634. }
  635. platform_set_drvdata(pdev, dev);
  636. dev->clk = devm_clk_get(dev->dev, NULL);
  637. if (IS_ERR(dev->clk)) {
  638. dev_err(dev->dev, "no clock defined\n");
  639. return -ENODEV;
  640. }
  641. clk_prepare_enable(dev->clk);
  642. if (dev->dev->of_node) {
  643. rc = at91_twi_configure_dma(dev, phy_addr);
  644. if (rc == -EPROBE_DEFER)
  645. return rc;
  646. }
  647. rc = of_property_read_u32(dev->dev->of_node, "clock-frequency",
  648. &bus_clk_rate);
  649. if (rc)
  650. bus_clk_rate = DEFAULT_TWI_CLK_HZ;
  651. at91_calc_twi_clock(dev, bus_clk_rate);
  652. at91_init_twi_bus(dev);
  653. snprintf(dev->adapter.name, sizeof(dev->adapter.name), "AT91");
  654. i2c_set_adapdata(&dev->adapter, dev);
  655. dev->adapter.owner = THIS_MODULE;
  656. dev->adapter.class = I2C_CLASS_DEPRECATED;
  657. dev->adapter.algo = &at91_twi_algorithm;
  658. dev->adapter.dev.parent = dev->dev;
  659. dev->adapter.nr = pdev->id;
  660. dev->adapter.timeout = AT91_I2C_TIMEOUT;
  661. dev->adapter.dev.of_node = pdev->dev.of_node;
  662. pm_runtime_set_autosuspend_delay(dev->dev, AUTOSUSPEND_TIMEOUT);
  663. pm_runtime_use_autosuspend(dev->dev);
  664. pm_runtime_set_active(dev->dev);
  665. pm_runtime_enable(dev->dev);
  666. rc = i2c_add_numbered_adapter(&dev->adapter);
  667. if (rc) {
  668. dev_err(dev->dev, "Adapter %s registration failed\n",
  669. dev->adapter.name);
  670. clk_disable_unprepare(dev->clk);
  671. pm_runtime_disable(dev->dev);
  672. pm_runtime_set_suspended(dev->dev);
  673. return rc;
  674. }
  675. dev_info(dev->dev, "AT91 i2c bus driver.\n");
  676. return 0;
  677. }
  678. static int at91_twi_remove(struct platform_device *pdev)
  679. {
  680. struct at91_twi_dev *dev = platform_get_drvdata(pdev);
  681. i2c_del_adapter(&dev->adapter);
  682. clk_disable_unprepare(dev->clk);
  683. pm_runtime_disable(dev->dev);
  684. pm_runtime_set_suspended(dev->dev);
  685. return 0;
  686. }
  687. #ifdef CONFIG_PM
  688. static int at91_twi_runtime_suspend(struct device *dev)
  689. {
  690. struct at91_twi_dev *twi_dev = dev_get_drvdata(dev);
  691. clk_disable_unprepare(twi_dev->clk);
  692. pinctrl_pm_select_sleep_state(dev);
  693. return 0;
  694. }
  695. static int at91_twi_runtime_resume(struct device *dev)
  696. {
  697. struct at91_twi_dev *twi_dev = dev_get_drvdata(dev);
  698. pinctrl_pm_select_default_state(dev);
  699. return clk_prepare_enable(twi_dev->clk);
  700. }
  701. static int at91_twi_suspend_noirq(struct device *dev)
  702. {
  703. if (!pm_runtime_status_suspended(dev))
  704. at91_twi_runtime_suspend(dev);
  705. return 0;
  706. }
  707. static int at91_twi_resume_noirq(struct device *dev)
  708. {
  709. int ret;
  710. if (!pm_runtime_status_suspended(dev)) {
  711. ret = at91_twi_runtime_resume(dev);
  712. if (ret)
  713. return ret;
  714. }
  715. pm_runtime_mark_last_busy(dev);
  716. pm_request_autosuspend(dev);
  717. return 0;
  718. }
  719. static const struct dev_pm_ops at91_twi_pm = {
  720. .suspend_noirq = at91_twi_suspend_noirq,
  721. .resume_noirq = at91_twi_resume_noirq,
  722. .runtime_suspend = at91_twi_runtime_suspend,
  723. .runtime_resume = at91_twi_runtime_resume,
  724. };
  725. #define at91_twi_pm_ops (&at91_twi_pm)
  726. #else
  727. #define at91_twi_pm_ops NULL
  728. #endif
  729. static struct platform_driver at91_twi_driver = {
  730. .probe = at91_twi_probe,
  731. .remove = at91_twi_remove,
  732. .id_table = at91_twi_devtypes,
  733. .driver = {
  734. .name = "at91_i2c",
  735. .of_match_table = of_match_ptr(atmel_twi_dt_ids),
  736. .pm = at91_twi_pm_ops,
  737. },
  738. };
  739. static int __init at91_twi_init(void)
  740. {
  741. return platform_driver_register(&at91_twi_driver);
  742. }
  743. static void __exit at91_twi_exit(void)
  744. {
  745. platform_driver_unregister(&at91_twi_driver);
  746. }
  747. subsys_initcall(at91_twi_init);
  748. module_exit(at91_twi_exit);
  749. MODULE_AUTHOR("Nikolaus Voss <n.voss@weinmann.de>");
  750. MODULE_DESCRIPTION("I2C (TWI) driver for Atmel AT91");
  751. MODULE_LICENSE("GPL");
  752. MODULE_ALIAS("platform:at91_i2c");