rmi_spi.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533
  1. /*
  2. * Copyright (c) 2011-2016 Synaptics Incorporated
  3. * Copyright (c) 2011 Unixphere
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published by
  7. * the Free Software Foundation.
  8. */
  9. #include <linux/kernel.h>
  10. #include <linux/module.h>
  11. #include <linux/rmi.h>
  12. #include <linux/slab.h>
  13. #include <linux/spi/spi.h>
  14. #include <linux/of.h>
  15. #include "rmi_driver.h"
  16. #define RMI_SPI_DEFAULT_XFER_BUF_SIZE 64
  17. #define RMI_PAGE_SELECT_REGISTER 0x00FF
  18. #define RMI_SPI_PAGE(addr) (((addr) >> 8) & 0x80)
  19. #define RMI_SPI_XFER_SIZE_LIMIT 255
  20. #define BUFFER_SIZE_INCREMENT 32
  21. enum rmi_spi_op {
  22. RMI_SPI_WRITE = 0,
  23. RMI_SPI_READ,
  24. RMI_SPI_V2_READ_UNIFIED,
  25. RMI_SPI_V2_READ_SPLIT,
  26. RMI_SPI_V2_WRITE,
  27. };
  28. struct rmi_spi_cmd {
  29. enum rmi_spi_op op;
  30. u16 addr;
  31. };
  32. struct rmi_spi_xport {
  33. struct rmi_transport_dev xport;
  34. struct spi_device *spi;
  35. struct mutex page_mutex;
  36. int page;
  37. u8 *rx_buf;
  38. u8 *tx_buf;
  39. int xfer_buf_size;
  40. struct spi_transfer *rx_xfers;
  41. struct spi_transfer *tx_xfers;
  42. int rx_xfer_count;
  43. int tx_xfer_count;
  44. };
  45. static int rmi_spi_manage_pools(struct rmi_spi_xport *rmi_spi, int len)
  46. {
  47. struct spi_device *spi = rmi_spi->spi;
  48. int buf_size = rmi_spi->xfer_buf_size
  49. ? rmi_spi->xfer_buf_size : RMI_SPI_DEFAULT_XFER_BUF_SIZE;
  50. struct spi_transfer *xfer_buf;
  51. void *buf;
  52. void *tmp;
  53. while (buf_size < len)
  54. buf_size *= 2;
  55. if (buf_size > RMI_SPI_XFER_SIZE_LIMIT)
  56. buf_size = RMI_SPI_XFER_SIZE_LIMIT;
  57. tmp = rmi_spi->rx_buf;
  58. buf = devm_kzalloc(&spi->dev, buf_size * 2,
  59. GFP_KERNEL | GFP_DMA);
  60. if (!buf)
  61. return -ENOMEM;
  62. rmi_spi->rx_buf = buf;
  63. rmi_spi->tx_buf = &rmi_spi->rx_buf[buf_size];
  64. rmi_spi->xfer_buf_size = buf_size;
  65. if (tmp)
  66. devm_kfree(&spi->dev, tmp);
  67. if (rmi_spi->xport.pdata.spi_data.read_delay_us)
  68. rmi_spi->rx_xfer_count = buf_size;
  69. else
  70. rmi_spi->rx_xfer_count = 1;
  71. if (rmi_spi->xport.pdata.spi_data.write_delay_us)
  72. rmi_spi->tx_xfer_count = buf_size;
  73. else
  74. rmi_spi->tx_xfer_count = 1;
  75. /*
  76. * Allocate a pool of spi_transfer buffers for devices which need
  77. * per byte delays.
  78. */
  79. tmp = rmi_spi->rx_xfers;
  80. xfer_buf = devm_kzalloc(&spi->dev,
  81. (rmi_spi->rx_xfer_count + rmi_spi->tx_xfer_count)
  82. * sizeof(struct spi_transfer), GFP_KERNEL);
  83. if (!xfer_buf)
  84. return -ENOMEM;
  85. rmi_spi->rx_xfers = xfer_buf;
  86. rmi_spi->tx_xfers = &xfer_buf[rmi_spi->rx_xfer_count];
  87. if (tmp)
  88. devm_kfree(&spi->dev, tmp);
  89. return 0;
  90. }
  91. static int rmi_spi_xfer(struct rmi_spi_xport *rmi_spi,
  92. const struct rmi_spi_cmd *cmd, const u8 *tx_buf,
  93. int tx_len, u8 *rx_buf, int rx_len)
  94. {
  95. struct spi_device *spi = rmi_spi->spi;
  96. struct rmi_device_platform_data_spi *spi_data =
  97. &rmi_spi->xport.pdata.spi_data;
  98. struct spi_message msg;
  99. struct spi_transfer *xfer;
  100. int ret = 0;
  101. int len;
  102. int cmd_len = 0;
  103. int total_tx_len;
  104. int i;
  105. u16 addr = cmd->addr;
  106. spi_message_init(&msg);
  107. switch (cmd->op) {
  108. case RMI_SPI_WRITE:
  109. case RMI_SPI_READ:
  110. cmd_len += 2;
  111. break;
  112. case RMI_SPI_V2_READ_UNIFIED:
  113. case RMI_SPI_V2_READ_SPLIT:
  114. case RMI_SPI_V2_WRITE:
  115. cmd_len += 4;
  116. break;
  117. }
  118. total_tx_len = cmd_len + tx_len;
  119. len = max(total_tx_len, rx_len);
  120. if (len > RMI_SPI_XFER_SIZE_LIMIT)
  121. return -EINVAL;
  122. if (rmi_spi->xfer_buf_size < len) {
  123. ret = rmi_spi_manage_pools(rmi_spi, len);
  124. if (ret < 0)
  125. return ret;
  126. }
  127. if (addr == 0)
  128. /*
  129. * SPI needs an address. Use 0x7FF if we want to keep
  130. * reading from the last position of the register pointer.
  131. */
  132. addr = 0x7FF;
  133. switch (cmd->op) {
  134. case RMI_SPI_WRITE:
  135. rmi_spi->tx_buf[0] = (addr >> 8);
  136. rmi_spi->tx_buf[1] = addr & 0xFF;
  137. break;
  138. case RMI_SPI_READ:
  139. rmi_spi->tx_buf[0] = (addr >> 8) | 0x80;
  140. rmi_spi->tx_buf[1] = addr & 0xFF;
  141. break;
  142. case RMI_SPI_V2_READ_UNIFIED:
  143. break;
  144. case RMI_SPI_V2_READ_SPLIT:
  145. break;
  146. case RMI_SPI_V2_WRITE:
  147. rmi_spi->tx_buf[0] = 0x40;
  148. rmi_spi->tx_buf[1] = (addr >> 8) & 0xFF;
  149. rmi_spi->tx_buf[2] = addr & 0xFF;
  150. rmi_spi->tx_buf[3] = tx_len;
  151. break;
  152. }
  153. if (tx_buf)
  154. memcpy(&rmi_spi->tx_buf[cmd_len], tx_buf, tx_len);
  155. if (rmi_spi->tx_xfer_count > 1) {
  156. for (i = 0; i < total_tx_len; i++) {
  157. xfer = &rmi_spi->tx_xfers[i];
  158. memset(xfer, 0, sizeof(struct spi_transfer));
  159. xfer->tx_buf = &rmi_spi->tx_buf[i];
  160. xfer->len = 1;
  161. xfer->delay_usecs = spi_data->write_delay_us;
  162. spi_message_add_tail(xfer, &msg);
  163. }
  164. } else {
  165. xfer = rmi_spi->tx_xfers;
  166. memset(xfer, 0, sizeof(struct spi_transfer));
  167. xfer->tx_buf = rmi_spi->tx_buf;
  168. xfer->len = total_tx_len;
  169. spi_message_add_tail(xfer, &msg);
  170. }
  171. rmi_dbg(RMI_DEBUG_XPORT, &spi->dev, "%s: cmd: %s tx_buf len: %d tx_buf: %*ph\n",
  172. __func__, cmd->op == RMI_SPI_WRITE ? "WRITE" : "READ",
  173. total_tx_len, total_tx_len, rmi_spi->tx_buf);
  174. if (rx_buf) {
  175. if (rmi_spi->rx_xfer_count > 1) {
  176. for (i = 0; i < rx_len; i++) {
  177. xfer = &rmi_spi->rx_xfers[i];
  178. memset(xfer, 0, sizeof(struct spi_transfer));
  179. xfer->rx_buf = &rmi_spi->rx_buf[i];
  180. xfer->len = 1;
  181. xfer->delay_usecs = spi_data->read_delay_us;
  182. spi_message_add_tail(xfer, &msg);
  183. }
  184. } else {
  185. xfer = rmi_spi->rx_xfers;
  186. memset(xfer, 0, sizeof(struct spi_transfer));
  187. xfer->rx_buf = rmi_spi->rx_buf;
  188. xfer->len = rx_len;
  189. spi_message_add_tail(xfer, &msg);
  190. }
  191. }
  192. ret = spi_sync(spi, &msg);
  193. if (ret < 0) {
  194. dev_err(&spi->dev, "spi xfer failed: %d\n", ret);
  195. return ret;
  196. }
  197. if (rx_buf) {
  198. memcpy(rx_buf, rmi_spi->rx_buf, rx_len);
  199. rmi_dbg(RMI_DEBUG_XPORT, &spi->dev, "%s: (%d) %*ph\n",
  200. __func__, rx_len, rx_len, rx_buf);
  201. }
  202. return 0;
  203. }
  204. /*
  205. * rmi_set_page - Set RMI page
  206. * @xport: The pointer to the rmi_transport_dev struct
  207. * @page: The new page address.
  208. *
  209. * RMI devices have 16-bit addressing, but some of the transport
  210. * implementations (like SMBus) only have 8-bit addressing. So RMI implements
  211. * a page address at 0xff of every page so we can reliable page addresses
  212. * every 256 registers.
  213. *
  214. * The page_mutex lock must be held when this function is entered.
  215. *
  216. * Returns zero on success, non-zero on failure.
  217. */
  218. static int rmi_set_page(struct rmi_spi_xport *rmi_spi, u8 page)
  219. {
  220. struct rmi_spi_cmd cmd;
  221. int ret;
  222. cmd.op = RMI_SPI_WRITE;
  223. cmd.addr = RMI_PAGE_SELECT_REGISTER;
  224. ret = rmi_spi_xfer(rmi_spi, &cmd, &page, 1, NULL, 0);
  225. if (ret)
  226. rmi_spi->page = page;
  227. return ret;
  228. }
  229. static int rmi_spi_write_block(struct rmi_transport_dev *xport, u16 addr,
  230. const void *buf, size_t len)
  231. {
  232. struct rmi_spi_xport *rmi_spi =
  233. container_of(xport, struct rmi_spi_xport, xport);
  234. struct rmi_spi_cmd cmd;
  235. int ret;
  236. mutex_lock(&rmi_spi->page_mutex);
  237. if (RMI_SPI_PAGE(addr) != rmi_spi->page) {
  238. ret = rmi_set_page(rmi_spi, RMI_SPI_PAGE(addr));
  239. if (ret)
  240. goto exit;
  241. }
  242. cmd.op = RMI_SPI_WRITE;
  243. cmd.addr = addr;
  244. ret = rmi_spi_xfer(rmi_spi, &cmd, buf, len, NULL, 0);
  245. exit:
  246. mutex_unlock(&rmi_spi->page_mutex);
  247. return ret;
  248. }
  249. static int rmi_spi_read_block(struct rmi_transport_dev *xport, u16 addr,
  250. void *buf, size_t len)
  251. {
  252. struct rmi_spi_xport *rmi_spi =
  253. container_of(xport, struct rmi_spi_xport, xport);
  254. struct rmi_spi_cmd cmd;
  255. int ret;
  256. mutex_lock(&rmi_spi->page_mutex);
  257. if (RMI_SPI_PAGE(addr) != rmi_spi->page) {
  258. ret = rmi_set_page(rmi_spi, RMI_SPI_PAGE(addr));
  259. if (ret)
  260. goto exit;
  261. }
  262. cmd.op = RMI_SPI_READ;
  263. cmd.addr = addr;
  264. ret = rmi_spi_xfer(rmi_spi, &cmd, NULL, 0, buf, len);
  265. exit:
  266. mutex_unlock(&rmi_spi->page_mutex);
  267. return ret;
  268. }
  269. static const struct rmi_transport_ops rmi_spi_ops = {
  270. .write_block = rmi_spi_write_block,
  271. .read_block = rmi_spi_read_block,
  272. };
  273. #ifdef CONFIG_OF
  274. static int rmi_spi_of_probe(struct spi_device *spi,
  275. struct rmi_device_platform_data *pdata)
  276. {
  277. struct device *dev = &spi->dev;
  278. int retval;
  279. retval = rmi_of_property_read_u32(dev,
  280. &pdata->spi_data.read_delay_us,
  281. "spi-rx-delay-us", 1);
  282. if (retval)
  283. return retval;
  284. retval = rmi_of_property_read_u32(dev,
  285. &pdata->spi_data.write_delay_us,
  286. "spi-tx-delay-us", 1);
  287. if (retval)
  288. return retval;
  289. return 0;
  290. }
  291. static const struct of_device_id rmi_spi_of_match[] = {
  292. { .compatible = "syna,rmi4-spi" },
  293. {},
  294. };
  295. MODULE_DEVICE_TABLE(of, rmi_spi_of_match);
  296. #else
  297. static inline int rmi_spi_of_probe(struct spi_device *spi,
  298. struct rmi_device_platform_data *pdata)
  299. {
  300. return -ENODEV;
  301. }
  302. #endif
  303. static void rmi_spi_unregister_transport(void *data)
  304. {
  305. struct rmi_spi_xport *rmi_spi = data;
  306. rmi_unregister_transport_device(&rmi_spi->xport);
  307. }
  308. static int rmi_spi_probe(struct spi_device *spi)
  309. {
  310. struct rmi_spi_xport *rmi_spi;
  311. struct rmi_device_platform_data *pdata;
  312. struct rmi_device_platform_data *spi_pdata = spi->dev.platform_data;
  313. int error;
  314. if (spi->master->flags & SPI_MASTER_HALF_DUPLEX)
  315. return -EINVAL;
  316. rmi_spi = devm_kzalloc(&spi->dev, sizeof(struct rmi_spi_xport),
  317. GFP_KERNEL);
  318. if (!rmi_spi)
  319. return -ENOMEM;
  320. pdata = &rmi_spi->xport.pdata;
  321. if (spi->dev.of_node) {
  322. error = rmi_spi_of_probe(spi, pdata);
  323. if (error)
  324. return error;
  325. } else if (spi_pdata) {
  326. *pdata = *spi_pdata;
  327. }
  328. if (pdata->spi_data.bits_per_word)
  329. spi->bits_per_word = pdata->spi_data.bits_per_word;
  330. if (pdata->spi_data.mode)
  331. spi->mode = pdata->spi_data.mode;
  332. error = spi_setup(spi);
  333. if (error < 0) {
  334. dev_err(&spi->dev, "spi_setup failed!\n");
  335. return error;
  336. }
  337. pdata->irq = spi->irq;
  338. rmi_spi->spi = spi;
  339. mutex_init(&rmi_spi->page_mutex);
  340. rmi_spi->xport.dev = &spi->dev;
  341. rmi_spi->xport.proto_name = "spi";
  342. rmi_spi->xport.ops = &rmi_spi_ops;
  343. spi_set_drvdata(spi, rmi_spi);
  344. error = rmi_spi_manage_pools(rmi_spi, RMI_SPI_DEFAULT_XFER_BUF_SIZE);
  345. if (error)
  346. return error;
  347. /*
  348. * Setting the page to zero will (a) make sure the PSR is in a
  349. * known state, and (b) make sure we can talk to the device.
  350. */
  351. error = rmi_set_page(rmi_spi, 0);
  352. if (error) {
  353. dev_err(&spi->dev, "Failed to set page select to 0.\n");
  354. return error;
  355. }
  356. dev_info(&spi->dev, "registering SPI-connected sensor\n");
  357. error = rmi_register_transport_device(&rmi_spi->xport);
  358. if (error) {
  359. dev_err(&spi->dev, "failed to register sensor: %d\n", error);
  360. return error;
  361. }
  362. error = devm_add_action_or_reset(&spi->dev,
  363. rmi_spi_unregister_transport,
  364. rmi_spi);
  365. if (error)
  366. return error;
  367. return 0;
  368. }
  369. #ifdef CONFIG_PM_SLEEP
  370. static int rmi_spi_suspend(struct device *dev)
  371. {
  372. struct spi_device *spi = to_spi_device(dev);
  373. struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
  374. int ret;
  375. ret = rmi_driver_suspend(rmi_spi->xport.rmi_dev, true);
  376. if (ret)
  377. dev_warn(dev, "Failed to resume device: %d\n", ret);
  378. return ret;
  379. }
  380. static int rmi_spi_resume(struct device *dev)
  381. {
  382. struct spi_device *spi = to_spi_device(dev);
  383. struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
  384. int ret;
  385. ret = rmi_driver_resume(rmi_spi->xport.rmi_dev, true);
  386. if (ret)
  387. dev_warn(dev, "Failed to resume device: %d\n", ret);
  388. return ret;
  389. }
  390. #endif
  391. #ifdef CONFIG_PM
  392. static int rmi_spi_runtime_suspend(struct device *dev)
  393. {
  394. struct spi_device *spi = to_spi_device(dev);
  395. struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
  396. int ret;
  397. ret = rmi_driver_suspend(rmi_spi->xport.rmi_dev, false);
  398. if (ret)
  399. dev_warn(dev, "Failed to resume device: %d\n", ret);
  400. return 0;
  401. }
  402. static int rmi_spi_runtime_resume(struct device *dev)
  403. {
  404. struct spi_device *spi = to_spi_device(dev);
  405. struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
  406. int ret;
  407. ret = rmi_driver_resume(rmi_spi->xport.rmi_dev, false);
  408. if (ret)
  409. dev_warn(dev, "Failed to resume device: %d\n", ret);
  410. return 0;
  411. }
  412. #endif
  413. static const struct dev_pm_ops rmi_spi_pm = {
  414. SET_SYSTEM_SLEEP_PM_OPS(rmi_spi_suspend, rmi_spi_resume)
  415. SET_RUNTIME_PM_OPS(rmi_spi_runtime_suspend, rmi_spi_runtime_resume,
  416. NULL)
  417. };
  418. static const struct spi_device_id rmi_id[] = {
  419. { "rmi4_spi", 0 },
  420. { }
  421. };
  422. MODULE_DEVICE_TABLE(spi, rmi_id);
  423. static struct spi_driver rmi_spi_driver = {
  424. .driver = {
  425. .name = "rmi4_spi",
  426. .pm = &rmi_spi_pm,
  427. .of_match_table = of_match_ptr(rmi_spi_of_match),
  428. },
  429. .id_table = rmi_id,
  430. .probe = rmi_spi_probe,
  431. };
  432. module_spi_driver(rmi_spi_driver);
  433. MODULE_AUTHOR("Christopher Heiny <cheiny@synaptics.com>");
  434. MODULE_AUTHOR("Andrew Duggan <aduggan@synaptics.com>");
  435. MODULE_DESCRIPTION("RMI SPI driver");
  436. MODULE_LICENSE("GPL");