rmi_driver.c 31 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292
  1. /*
  2. * Copyright (c) 2011-2016 Synaptics Incorporated
  3. * Copyright (c) 2011 Unixphere
  4. *
  5. * This driver provides the core support for a single RMI4-based device.
  6. *
  7. * The RMI4 specification can be found here (URL split for line length):
  8. *
  9. * http://www.synaptics.com/sites/default/files/
  10. * 511-000136-01-Rev-E-RMI4-Interfacing-Guide.pdf
  11. *
  12. * This program is free software; you can redistribute it and/or modify it
  13. * under the terms of the GNU General Public License version 2 as published by
  14. * the Free Software Foundation.
  15. */
  16. #include <linux/bitmap.h>
  17. #include <linux/delay.h>
  18. #include <linux/fs.h>
  19. #include <linux/irq.h>
  20. #include <linux/pm.h>
  21. #include <linux/slab.h>
  22. #include <linux/of.h>
  23. #include <uapi/linux/input.h>
  24. #include <linux/rmi.h>
  25. #include "rmi_bus.h"
  26. #include "rmi_driver.h"
  27. #define HAS_NONSTANDARD_PDT_MASK 0x40
  28. #define RMI4_MAX_PAGE 0xff
  29. #define RMI4_PAGE_SIZE 0x100
  30. #define RMI4_PAGE_MASK 0xFF00
  31. #define RMI_DEVICE_RESET_CMD 0x01
  32. #define DEFAULT_RESET_DELAY_MS 100
  33. void rmi_free_function_list(struct rmi_device *rmi_dev)
  34. {
  35. struct rmi_function *fn, *tmp;
  36. struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
  37. rmi_dbg(RMI_DEBUG_CORE, &rmi_dev->dev, "Freeing function list\n");
  38. /* Doing it in the reverse order so F01 will be removed last */
  39. list_for_each_entry_safe_reverse(fn, tmp,
  40. &data->function_list, node) {
  41. list_del(&fn->node);
  42. rmi_unregister_function(fn);
  43. }
  44. devm_kfree(&rmi_dev->dev, data->irq_memory);
  45. data->irq_memory = NULL;
  46. data->irq_status = NULL;
  47. data->fn_irq_bits = NULL;
  48. data->current_irq_mask = NULL;
  49. data->new_irq_mask = NULL;
  50. data->f01_container = NULL;
  51. data->f34_container = NULL;
  52. }
  53. static int reset_one_function(struct rmi_function *fn)
  54. {
  55. struct rmi_function_handler *fh;
  56. int retval = 0;
  57. if (!fn || !fn->dev.driver)
  58. return 0;
  59. fh = to_rmi_function_handler(fn->dev.driver);
  60. if (fh->reset) {
  61. retval = fh->reset(fn);
  62. if (retval < 0)
  63. dev_err(&fn->dev, "Reset failed with code %d.\n",
  64. retval);
  65. }
  66. return retval;
  67. }
  68. static int configure_one_function(struct rmi_function *fn)
  69. {
  70. struct rmi_function_handler *fh;
  71. int retval = 0;
  72. if (!fn || !fn->dev.driver)
  73. return 0;
  74. fh = to_rmi_function_handler(fn->dev.driver);
  75. if (fh->config) {
  76. retval = fh->config(fn);
  77. if (retval < 0)
  78. dev_err(&fn->dev, "Config failed with code %d.\n",
  79. retval);
  80. }
  81. return retval;
  82. }
  83. static int rmi_driver_process_reset_requests(struct rmi_device *rmi_dev)
  84. {
  85. struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
  86. struct rmi_function *entry;
  87. int retval;
  88. list_for_each_entry(entry, &data->function_list, node) {
  89. retval = reset_one_function(entry);
  90. if (retval < 0)
  91. return retval;
  92. }
  93. return 0;
  94. }
  95. static int rmi_driver_process_config_requests(struct rmi_device *rmi_dev)
  96. {
  97. struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
  98. struct rmi_function *entry;
  99. int retval;
  100. list_for_each_entry(entry, &data->function_list, node) {
  101. retval = configure_one_function(entry);
  102. if (retval < 0)
  103. return retval;
  104. }
  105. return 0;
  106. }
  107. static void process_one_interrupt(struct rmi_driver_data *data,
  108. struct rmi_function *fn)
  109. {
  110. struct rmi_function_handler *fh;
  111. if (!fn || !fn->dev.driver)
  112. return;
  113. fh = to_rmi_function_handler(fn->dev.driver);
  114. if (fh->attention) {
  115. bitmap_and(data->fn_irq_bits, data->irq_status, fn->irq_mask,
  116. data->irq_count);
  117. if (!bitmap_empty(data->fn_irq_bits, data->irq_count))
  118. fh->attention(fn, data->fn_irq_bits);
  119. }
  120. }
  121. static int rmi_process_interrupt_requests(struct rmi_device *rmi_dev)
  122. {
  123. struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
  124. struct device *dev = &rmi_dev->dev;
  125. struct rmi_function *entry;
  126. int error;
  127. if (!data)
  128. return 0;
  129. if (!data->attn_data.data) {
  130. error = rmi_read_block(rmi_dev,
  131. data->f01_container->fd.data_base_addr + 1,
  132. data->irq_status, data->num_of_irq_regs);
  133. if (error < 0) {
  134. dev_err(dev, "Failed to read irqs, code=%d\n", error);
  135. return error;
  136. }
  137. }
  138. mutex_lock(&data->irq_mutex);
  139. bitmap_and(data->irq_status, data->irq_status, data->current_irq_mask,
  140. data->irq_count);
  141. /*
  142. * At this point, irq_status has all bits that are set in the
  143. * interrupt status register and are enabled.
  144. */
  145. mutex_unlock(&data->irq_mutex);
  146. /*
  147. * It would be nice to be able to use irq_chip to handle these
  148. * nested IRQs. Unfortunately, most of the current customers for
  149. * this driver are using older kernels (3.0.x) that don't support
  150. * the features required for that. Once they've shifted to more
  151. * recent kernels (say, 3.3 and higher), this should be switched to
  152. * use irq_chip.
  153. */
  154. list_for_each_entry(entry, &data->function_list, node)
  155. process_one_interrupt(data, entry);
  156. if (data->input)
  157. input_sync(data->input);
  158. return 0;
  159. }
  160. void rmi_set_attn_data(struct rmi_device *rmi_dev, unsigned long irq_status,
  161. void *data, size_t size)
  162. {
  163. struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev);
  164. struct rmi4_attn_data attn_data;
  165. void *fifo_data;
  166. if (!drvdata->enabled)
  167. return;
  168. fifo_data = kmemdup(data, size, GFP_ATOMIC);
  169. if (!fifo_data)
  170. return;
  171. attn_data.irq_status = irq_status;
  172. attn_data.size = size;
  173. attn_data.data = fifo_data;
  174. kfifo_put(&drvdata->attn_fifo, attn_data);
  175. }
  176. EXPORT_SYMBOL_GPL(rmi_set_attn_data);
  177. static irqreturn_t rmi_irq_fn(int irq, void *dev_id)
  178. {
  179. struct rmi_device *rmi_dev = dev_id;
  180. struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev);
  181. struct rmi4_attn_data attn_data = {0};
  182. int ret, count;
  183. count = kfifo_get(&drvdata->attn_fifo, &attn_data);
  184. if (count) {
  185. *(drvdata->irq_status) = attn_data.irq_status;
  186. drvdata->attn_data = attn_data;
  187. }
  188. ret = rmi_process_interrupt_requests(rmi_dev);
  189. if (ret)
  190. rmi_dbg(RMI_DEBUG_CORE, &rmi_dev->dev,
  191. "Failed to process interrupt request: %d\n", ret);
  192. if (count) {
  193. kfree(attn_data.data);
  194. attn_data.data = NULL;
  195. }
  196. if (!kfifo_is_empty(&drvdata->attn_fifo))
  197. return rmi_irq_fn(irq, dev_id);
  198. return IRQ_HANDLED;
  199. }
  200. static int rmi_irq_init(struct rmi_device *rmi_dev)
  201. {
  202. struct rmi_device_platform_data *pdata = rmi_get_platform_data(rmi_dev);
  203. struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
  204. int irq_flags = irq_get_trigger_type(pdata->irq);
  205. int ret;
  206. if (!irq_flags)
  207. irq_flags = IRQF_TRIGGER_LOW;
  208. ret = devm_request_threaded_irq(&rmi_dev->dev, pdata->irq, NULL,
  209. rmi_irq_fn, irq_flags | IRQF_ONESHOT,
  210. dev_driver_string(rmi_dev->xport->dev),
  211. rmi_dev);
  212. if (ret < 0) {
  213. dev_err(&rmi_dev->dev, "Failed to register interrupt %d\n",
  214. pdata->irq);
  215. return ret;
  216. }
  217. data->enabled = true;
  218. return 0;
  219. }
  220. struct rmi_function *rmi_find_function(struct rmi_device *rmi_dev, u8 number)
  221. {
  222. struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
  223. struct rmi_function *entry;
  224. list_for_each_entry(entry, &data->function_list, node) {
  225. if (entry->fd.function_number == number)
  226. return entry;
  227. }
  228. return NULL;
  229. }
  230. static int suspend_one_function(struct rmi_function *fn)
  231. {
  232. struct rmi_function_handler *fh;
  233. int retval = 0;
  234. if (!fn || !fn->dev.driver)
  235. return 0;
  236. fh = to_rmi_function_handler(fn->dev.driver);
  237. if (fh->suspend) {
  238. retval = fh->suspend(fn);
  239. if (retval < 0)
  240. dev_err(&fn->dev, "Suspend failed with code %d.\n",
  241. retval);
  242. }
  243. return retval;
  244. }
  245. static int rmi_suspend_functions(struct rmi_device *rmi_dev)
  246. {
  247. struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
  248. struct rmi_function *entry;
  249. int retval;
  250. list_for_each_entry(entry, &data->function_list, node) {
  251. retval = suspend_one_function(entry);
  252. if (retval < 0)
  253. return retval;
  254. }
  255. return 0;
  256. }
  257. static int resume_one_function(struct rmi_function *fn)
  258. {
  259. struct rmi_function_handler *fh;
  260. int retval = 0;
  261. if (!fn || !fn->dev.driver)
  262. return 0;
  263. fh = to_rmi_function_handler(fn->dev.driver);
  264. if (fh->resume) {
  265. retval = fh->resume(fn);
  266. if (retval < 0)
  267. dev_err(&fn->dev, "Resume failed with code %d.\n",
  268. retval);
  269. }
  270. return retval;
  271. }
  272. static int rmi_resume_functions(struct rmi_device *rmi_dev)
  273. {
  274. struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
  275. struct rmi_function *entry;
  276. int retval;
  277. list_for_each_entry(entry, &data->function_list, node) {
  278. retval = resume_one_function(entry);
  279. if (retval < 0)
  280. return retval;
  281. }
  282. return 0;
  283. }
  284. int rmi_enable_sensor(struct rmi_device *rmi_dev)
  285. {
  286. int retval = 0;
  287. retval = rmi_driver_process_config_requests(rmi_dev);
  288. if (retval < 0)
  289. return retval;
  290. return rmi_process_interrupt_requests(rmi_dev);
  291. }
  292. /**
  293. * rmi_driver_set_input_params - set input device id and other data.
  294. *
  295. * @rmi_dev: Pointer to an RMI device
  296. * @input: Pointer to input device
  297. *
  298. */
  299. static int rmi_driver_set_input_params(struct rmi_device *rmi_dev,
  300. struct input_dev *input)
  301. {
  302. input->name = SYNAPTICS_INPUT_DEVICE_NAME;
  303. input->id.vendor = SYNAPTICS_VENDOR_ID;
  304. input->id.bustype = BUS_RMI;
  305. return 0;
  306. }
  307. static void rmi_driver_set_input_name(struct rmi_device *rmi_dev,
  308. struct input_dev *input)
  309. {
  310. struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
  311. const char *device_name = rmi_f01_get_product_ID(data->f01_container);
  312. char *name;
  313. name = devm_kasprintf(&rmi_dev->dev, GFP_KERNEL,
  314. "Synaptics %s", device_name);
  315. if (!name)
  316. return;
  317. input->name = name;
  318. }
  319. static int rmi_driver_set_irq_bits(struct rmi_device *rmi_dev,
  320. unsigned long *mask)
  321. {
  322. int error = 0;
  323. struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
  324. struct device *dev = &rmi_dev->dev;
  325. mutex_lock(&data->irq_mutex);
  326. bitmap_or(data->new_irq_mask,
  327. data->current_irq_mask, mask, data->irq_count);
  328. error = rmi_write_block(rmi_dev,
  329. data->f01_container->fd.control_base_addr + 1,
  330. data->new_irq_mask, data->num_of_irq_regs);
  331. if (error < 0) {
  332. dev_err(dev, "%s: Failed to change enabled interrupts!",
  333. __func__);
  334. goto error_unlock;
  335. }
  336. bitmap_copy(data->current_irq_mask, data->new_irq_mask,
  337. data->num_of_irq_regs);
  338. error_unlock:
  339. mutex_unlock(&data->irq_mutex);
  340. return error;
  341. }
  342. static int rmi_driver_clear_irq_bits(struct rmi_device *rmi_dev,
  343. unsigned long *mask)
  344. {
  345. int error = 0;
  346. struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
  347. struct device *dev = &rmi_dev->dev;
  348. mutex_lock(&data->irq_mutex);
  349. bitmap_andnot(data->new_irq_mask,
  350. data->current_irq_mask, mask, data->irq_count);
  351. error = rmi_write_block(rmi_dev,
  352. data->f01_container->fd.control_base_addr + 1,
  353. data->new_irq_mask, data->num_of_irq_regs);
  354. if (error < 0) {
  355. dev_err(dev, "%s: Failed to change enabled interrupts!",
  356. __func__);
  357. goto error_unlock;
  358. }
  359. bitmap_copy(data->current_irq_mask, data->new_irq_mask,
  360. data->num_of_irq_regs);
  361. error_unlock:
  362. mutex_unlock(&data->irq_mutex);
  363. return error;
  364. }
  365. static int rmi_driver_reset_handler(struct rmi_device *rmi_dev)
  366. {
  367. struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
  368. int error;
  369. /*
  370. * Can get called before the driver is fully ready to deal with
  371. * this situation.
  372. */
  373. if (!data || !data->f01_container) {
  374. dev_warn(&rmi_dev->dev,
  375. "Not ready to handle reset yet!\n");
  376. return 0;
  377. }
  378. error = rmi_read_block(rmi_dev,
  379. data->f01_container->fd.control_base_addr + 1,
  380. data->current_irq_mask, data->num_of_irq_regs);
  381. if (error < 0) {
  382. dev_err(&rmi_dev->dev, "%s: Failed to read current IRQ mask.\n",
  383. __func__);
  384. return error;
  385. }
  386. error = rmi_driver_process_reset_requests(rmi_dev);
  387. if (error < 0)
  388. return error;
  389. error = rmi_driver_process_config_requests(rmi_dev);
  390. if (error < 0)
  391. return error;
  392. return 0;
  393. }
  394. static int rmi_read_pdt_entry(struct rmi_device *rmi_dev,
  395. struct pdt_entry *entry, u16 pdt_address)
  396. {
  397. u8 buf[RMI_PDT_ENTRY_SIZE];
  398. int error;
  399. error = rmi_read_block(rmi_dev, pdt_address, buf, RMI_PDT_ENTRY_SIZE);
  400. if (error) {
  401. dev_err(&rmi_dev->dev, "Read PDT entry at %#06x failed, code: %d.\n",
  402. pdt_address, error);
  403. return error;
  404. }
  405. entry->page_start = pdt_address & RMI4_PAGE_MASK;
  406. entry->query_base_addr = buf[0];
  407. entry->command_base_addr = buf[1];
  408. entry->control_base_addr = buf[2];
  409. entry->data_base_addr = buf[3];
  410. entry->interrupt_source_count = buf[4] & RMI_PDT_INT_SOURCE_COUNT_MASK;
  411. entry->function_version = (buf[4] & RMI_PDT_FUNCTION_VERSION_MASK) >> 5;
  412. entry->function_number = buf[5];
  413. return 0;
  414. }
  415. static void rmi_driver_copy_pdt_to_fd(const struct pdt_entry *pdt,
  416. struct rmi_function_descriptor *fd)
  417. {
  418. fd->query_base_addr = pdt->query_base_addr + pdt->page_start;
  419. fd->command_base_addr = pdt->command_base_addr + pdt->page_start;
  420. fd->control_base_addr = pdt->control_base_addr + pdt->page_start;
  421. fd->data_base_addr = pdt->data_base_addr + pdt->page_start;
  422. fd->function_number = pdt->function_number;
  423. fd->interrupt_source_count = pdt->interrupt_source_count;
  424. fd->function_version = pdt->function_version;
  425. }
  426. #define RMI_SCAN_CONTINUE 0
  427. #define RMI_SCAN_DONE 1
  428. static int rmi_scan_pdt_page(struct rmi_device *rmi_dev,
  429. int page,
  430. int *empty_pages,
  431. void *ctx,
  432. int (*callback)(struct rmi_device *rmi_dev,
  433. void *ctx,
  434. const struct pdt_entry *entry))
  435. {
  436. struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
  437. struct pdt_entry pdt_entry;
  438. u16 page_start = RMI4_PAGE_SIZE * page;
  439. u16 pdt_start = page_start + PDT_START_SCAN_LOCATION;
  440. u16 pdt_end = page_start + PDT_END_SCAN_LOCATION;
  441. u16 addr;
  442. int error;
  443. int retval;
  444. for (addr = pdt_start; addr >= pdt_end; addr -= RMI_PDT_ENTRY_SIZE) {
  445. error = rmi_read_pdt_entry(rmi_dev, &pdt_entry, addr);
  446. if (error)
  447. return error;
  448. if (RMI4_END_OF_PDT(pdt_entry.function_number))
  449. break;
  450. retval = callback(rmi_dev, ctx, &pdt_entry);
  451. if (retval != RMI_SCAN_CONTINUE)
  452. return retval;
  453. }
  454. /*
  455. * Count number of empty PDT pages. If a gap of two pages
  456. * or more is found, stop scanning.
  457. */
  458. if (addr == pdt_start)
  459. ++*empty_pages;
  460. else
  461. *empty_pages = 0;
  462. return (data->bootloader_mode || *empty_pages >= 2) ?
  463. RMI_SCAN_DONE : RMI_SCAN_CONTINUE;
  464. }
  465. int rmi_scan_pdt(struct rmi_device *rmi_dev, void *ctx,
  466. int (*callback)(struct rmi_device *rmi_dev,
  467. void *ctx, const struct pdt_entry *entry))
  468. {
  469. int page;
  470. int empty_pages = 0;
  471. int retval = RMI_SCAN_DONE;
  472. for (page = 0; page <= RMI4_MAX_PAGE; page++) {
  473. retval = rmi_scan_pdt_page(rmi_dev, page, &empty_pages,
  474. ctx, callback);
  475. if (retval != RMI_SCAN_CONTINUE)
  476. break;
  477. }
  478. return retval < 0 ? retval : 0;
  479. }
  480. int rmi_read_register_desc(struct rmi_device *d, u16 addr,
  481. struct rmi_register_descriptor *rdesc)
  482. {
  483. int ret;
  484. u8 size_presence_reg;
  485. u8 buf[35];
  486. int presense_offset = 1;
  487. u8 *struct_buf;
  488. int reg;
  489. int offset = 0;
  490. int map_offset = 0;
  491. int i;
  492. int b;
  493. /*
  494. * The first register of the register descriptor is the size of
  495. * the register descriptor's presense register.
  496. */
  497. ret = rmi_read(d, addr, &size_presence_reg);
  498. if (ret)
  499. return ret;
  500. ++addr;
  501. if (size_presence_reg < 0 || size_presence_reg > 35)
  502. return -EIO;
  503. memset(buf, 0, sizeof(buf));
  504. /*
  505. * The presence register contains the size of the register structure
  506. * and a bitmap which identified which packet registers are present
  507. * for this particular register type (ie query, control, or data).
  508. */
  509. ret = rmi_read_block(d, addr, buf, size_presence_reg);
  510. if (ret)
  511. return ret;
  512. ++addr;
  513. if (buf[0] == 0) {
  514. presense_offset = 3;
  515. rdesc->struct_size = buf[1] | (buf[2] << 8);
  516. } else {
  517. rdesc->struct_size = buf[0];
  518. }
  519. for (i = presense_offset; i < size_presence_reg; i++) {
  520. for (b = 0; b < 8; b++) {
  521. if (buf[i] & (0x1 << b))
  522. bitmap_set(rdesc->presense_map, map_offset, 1);
  523. ++map_offset;
  524. }
  525. }
  526. rdesc->num_registers = bitmap_weight(rdesc->presense_map,
  527. RMI_REG_DESC_PRESENSE_BITS);
  528. rdesc->registers = devm_kzalloc(&d->dev, rdesc->num_registers *
  529. sizeof(struct rmi_register_desc_item),
  530. GFP_KERNEL);
  531. if (!rdesc->registers)
  532. return -ENOMEM;
  533. /*
  534. * Allocate a temporary buffer to hold the register structure.
  535. * I'm not using devm_kzalloc here since it will not be retained
  536. * after exiting this function
  537. */
  538. struct_buf = kzalloc(rdesc->struct_size, GFP_KERNEL);
  539. if (!struct_buf)
  540. return -ENOMEM;
  541. /*
  542. * The register structure contains information about every packet
  543. * register of this type. This includes the size of the packet
  544. * register and a bitmap of all subpackets contained in the packet
  545. * register.
  546. */
  547. ret = rmi_read_block(d, addr, struct_buf, rdesc->struct_size);
  548. if (ret)
  549. goto free_struct_buff;
  550. reg = find_first_bit(rdesc->presense_map, RMI_REG_DESC_PRESENSE_BITS);
  551. for (i = 0; i < rdesc->num_registers; i++) {
  552. struct rmi_register_desc_item *item = &rdesc->registers[i];
  553. int reg_size = struct_buf[offset];
  554. ++offset;
  555. if (reg_size == 0) {
  556. reg_size = struct_buf[offset] |
  557. (struct_buf[offset + 1] << 8);
  558. offset += 2;
  559. }
  560. if (reg_size == 0) {
  561. reg_size = struct_buf[offset] |
  562. (struct_buf[offset + 1] << 8) |
  563. (struct_buf[offset + 2] << 16) |
  564. (struct_buf[offset + 3] << 24);
  565. offset += 4;
  566. }
  567. item->reg = reg;
  568. item->reg_size = reg_size;
  569. map_offset = 0;
  570. do {
  571. for (b = 0; b < 7; b++) {
  572. if (struct_buf[offset] & (0x1 << b))
  573. bitmap_set(item->subpacket_map,
  574. map_offset, 1);
  575. ++map_offset;
  576. }
  577. } while (struct_buf[offset++] & 0x80);
  578. item->num_subpackets = bitmap_weight(item->subpacket_map,
  579. RMI_REG_DESC_SUBPACKET_BITS);
  580. rmi_dbg(RMI_DEBUG_CORE, &d->dev,
  581. "%s: reg: %d reg size: %ld subpackets: %d\n", __func__,
  582. item->reg, item->reg_size, item->num_subpackets);
  583. reg = find_next_bit(rdesc->presense_map,
  584. RMI_REG_DESC_PRESENSE_BITS, reg + 1);
  585. }
  586. free_struct_buff:
  587. kfree(struct_buf);
  588. return ret;
  589. }
  590. const struct rmi_register_desc_item *rmi_get_register_desc_item(
  591. struct rmi_register_descriptor *rdesc, u16 reg)
  592. {
  593. const struct rmi_register_desc_item *item;
  594. int i;
  595. for (i = 0; i < rdesc->num_registers; i++) {
  596. item = &rdesc->registers[i];
  597. if (item->reg == reg)
  598. return item;
  599. }
  600. return NULL;
  601. }
  602. size_t rmi_register_desc_calc_size(struct rmi_register_descriptor *rdesc)
  603. {
  604. const struct rmi_register_desc_item *item;
  605. int i;
  606. size_t size = 0;
  607. for (i = 0; i < rdesc->num_registers; i++) {
  608. item = &rdesc->registers[i];
  609. size += item->reg_size;
  610. }
  611. return size;
  612. }
  613. /* Compute the register offset relative to the base address */
  614. int rmi_register_desc_calc_reg_offset(
  615. struct rmi_register_descriptor *rdesc, u16 reg)
  616. {
  617. const struct rmi_register_desc_item *item;
  618. int offset = 0;
  619. int i;
  620. for (i = 0; i < rdesc->num_registers; i++) {
  621. item = &rdesc->registers[i];
  622. if (item->reg == reg)
  623. return offset;
  624. ++offset;
  625. }
  626. return -1;
  627. }
  628. bool rmi_register_desc_has_subpacket(const struct rmi_register_desc_item *item,
  629. u8 subpacket)
  630. {
  631. return find_next_bit(item->subpacket_map, RMI_REG_DESC_PRESENSE_BITS,
  632. subpacket) == subpacket;
  633. }
  634. static int rmi_check_bootloader_mode(struct rmi_device *rmi_dev,
  635. const struct pdt_entry *pdt)
  636. {
  637. struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
  638. int ret;
  639. u8 status;
  640. if (pdt->function_number == 0x34 && pdt->function_version > 1) {
  641. ret = rmi_read(rmi_dev, pdt->data_base_addr, &status);
  642. if (ret) {
  643. dev_err(&rmi_dev->dev,
  644. "Failed to read F34 status: %d.\n", ret);
  645. return ret;
  646. }
  647. if (status & BIT(7))
  648. data->bootloader_mode = true;
  649. } else if (pdt->function_number == 0x01) {
  650. ret = rmi_read(rmi_dev, pdt->data_base_addr, &status);
  651. if (ret) {
  652. dev_err(&rmi_dev->dev,
  653. "Failed to read F01 status: %d.\n", ret);
  654. return ret;
  655. }
  656. if (status & BIT(6))
  657. data->bootloader_mode = true;
  658. }
  659. return 0;
  660. }
  661. static int rmi_count_irqs(struct rmi_device *rmi_dev,
  662. void *ctx, const struct pdt_entry *pdt)
  663. {
  664. int *irq_count = ctx;
  665. int ret;
  666. *irq_count += pdt->interrupt_source_count;
  667. ret = rmi_check_bootloader_mode(rmi_dev, pdt);
  668. if (ret < 0)
  669. return ret;
  670. return RMI_SCAN_CONTINUE;
  671. }
  672. int rmi_initial_reset(struct rmi_device *rmi_dev, void *ctx,
  673. const struct pdt_entry *pdt)
  674. {
  675. int error;
  676. if (pdt->function_number == 0x01) {
  677. u16 cmd_addr = pdt->page_start + pdt->command_base_addr;
  678. u8 cmd_buf = RMI_DEVICE_RESET_CMD;
  679. const struct rmi_device_platform_data *pdata =
  680. rmi_get_platform_data(rmi_dev);
  681. if (rmi_dev->xport->ops->reset) {
  682. error = rmi_dev->xport->ops->reset(rmi_dev->xport,
  683. cmd_addr);
  684. if (error)
  685. return error;
  686. return RMI_SCAN_DONE;
  687. }
  688. rmi_dbg(RMI_DEBUG_CORE, &rmi_dev->dev, "Sending reset\n");
  689. error = rmi_write_block(rmi_dev, cmd_addr, &cmd_buf, 1);
  690. if (error) {
  691. dev_err(&rmi_dev->dev,
  692. "Initial reset failed. Code = %d.\n", error);
  693. return error;
  694. }
  695. mdelay(pdata->reset_delay_ms ?: DEFAULT_RESET_DELAY_MS);
  696. return RMI_SCAN_DONE;
  697. }
  698. /* F01 should always be on page 0. If we don't find it there, fail. */
  699. return pdt->page_start == 0 ? RMI_SCAN_CONTINUE : -ENODEV;
  700. }
  701. static int rmi_create_function(struct rmi_device *rmi_dev,
  702. void *ctx, const struct pdt_entry *pdt)
  703. {
  704. struct device *dev = &rmi_dev->dev;
  705. struct rmi_driver_data *data = dev_get_drvdata(dev);
  706. int *current_irq_count = ctx;
  707. struct rmi_function *fn;
  708. int i;
  709. int error;
  710. rmi_dbg(RMI_DEBUG_CORE, dev, "Initializing F%02X.\n",
  711. pdt->function_number);
  712. fn = kzalloc(sizeof(struct rmi_function) +
  713. BITS_TO_LONGS(data->irq_count) * sizeof(unsigned long),
  714. GFP_KERNEL);
  715. if (!fn) {
  716. dev_err(dev, "Failed to allocate memory for F%02X\n",
  717. pdt->function_number);
  718. return -ENOMEM;
  719. }
  720. INIT_LIST_HEAD(&fn->node);
  721. rmi_driver_copy_pdt_to_fd(pdt, &fn->fd);
  722. fn->rmi_dev = rmi_dev;
  723. fn->num_of_irqs = pdt->interrupt_source_count;
  724. fn->irq_pos = *current_irq_count;
  725. *current_irq_count += fn->num_of_irqs;
  726. for (i = 0; i < fn->num_of_irqs; i++)
  727. set_bit(fn->irq_pos + i, fn->irq_mask);
  728. error = rmi_register_function(fn);
  729. if (error)
  730. goto err_put_fn;
  731. if (pdt->function_number == 0x01)
  732. data->f01_container = fn;
  733. else if (pdt->function_number == 0x34)
  734. data->f34_container = fn;
  735. list_add_tail(&fn->node, &data->function_list);
  736. return RMI_SCAN_CONTINUE;
  737. err_put_fn:
  738. put_device(&fn->dev);
  739. return error;
  740. }
  741. void rmi_enable_irq(struct rmi_device *rmi_dev, bool clear_wake)
  742. {
  743. struct rmi_device_platform_data *pdata = rmi_get_platform_data(rmi_dev);
  744. struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
  745. int irq = pdata->irq;
  746. int irq_flags;
  747. int retval;
  748. mutex_lock(&data->enabled_mutex);
  749. if (data->enabled)
  750. goto out;
  751. enable_irq(irq);
  752. data->enabled = true;
  753. if (clear_wake && device_may_wakeup(rmi_dev->xport->dev)) {
  754. retval = disable_irq_wake(irq);
  755. if (retval)
  756. dev_warn(&rmi_dev->dev,
  757. "Failed to disable irq for wake: %d\n",
  758. retval);
  759. }
  760. /*
  761. * Call rmi_process_interrupt_requests() after enabling irq,
  762. * otherwise we may lose interrupt on edge-triggered systems.
  763. */
  764. irq_flags = irq_get_trigger_type(pdata->irq);
  765. if (irq_flags & IRQ_TYPE_EDGE_BOTH)
  766. rmi_process_interrupt_requests(rmi_dev);
  767. out:
  768. mutex_unlock(&data->enabled_mutex);
  769. }
  770. void rmi_disable_irq(struct rmi_device *rmi_dev, bool enable_wake)
  771. {
  772. struct rmi_device_platform_data *pdata = rmi_get_platform_data(rmi_dev);
  773. struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
  774. struct rmi4_attn_data attn_data = {0};
  775. int irq = pdata->irq;
  776. int retval, count;
  777. mutex_lock(&data->enabled_mutex);
  778. if (!data->enabled)
  779. goto out;
  780. data->enabled = false;
  781. disable_irq(irq);
  782. if (enable_wake && device_may_wakeup(rmi_dev->xport->dev)) {
  783. retval = enable_irq_wake(irq);
  784. if (retval)
  785. dev_warn(&rmi_dev->dev,
  786. "Failed to enable irq for wake: %d\n",
  787. retval);
  788. }
  789. /* make sure the fifo is clean */
  790. while (!kfifo_is_empty(&data->attn_fifo)) {
  791. count = kfifo_get(&data->attn_fifo, &attn_data);
  792. if (count)
  793. kfree(attn_data.data);
  794. }
  795. out:
  796. mutex_unlock(&data->enabled_mutex);
  797. }
  798. int rmi_driver_suspend(struct rmi_device *rmi_dev, bool enable_wake)
  799. {
  800. int retval;
  801. retval = rmi_suspend_functions(rmi_dev);
  802. if (retval)
  803. dev_warn(&rmi_dev->dev, "Failed to suspend functions: %d\n",
  804. retval);
  805. rmi_disable_irq(rmi_dev, enable_wake);
  806. return retval;
  807. }
  808. EXPORT_SYMBOL_GPL(rmi_driver_suspend);
  809. int rmi_driver_resume(struct rmi_device *rmi_dev, bool clear_wake)
  810. {
  811. int retval;
  812. rmi_enable_irq(rmi_dev, clear_wake);
  813. retval = rmi_resume_functions(rmi_dev);
  814. if (retval)
  815. dev_warn(&rmi_dev->dev, "Failed to suspend functions: %d\n",
  816. retval);
  817. return retval;
  818. }
  819. EXPORT_SYMBOL_GPL(rmi_driver_resume);
  820. static int rmi_driver_remove(struct device *dev)
  821. {
  822. struct rmi_device *rmi_dev = to_rmi_device(dev);
  823. rmi_disable_irq(rmi_dev, false);
  824. rmi_f34_remove_sysfs(rmi_dev);
  825. rmi_free_function_list(rmi_dev);
  826. return 0;
  827. }
  828. #ifdef CONFIG_OF
  829. static int rmi_driver_of_probe(struct device *dev,
  830. struct rmi_device_platform_data *pdata)
  831. {
  832. int retval;
  833. retval = rmi_of_property_read_u32(dev, &pdata->reset_delay_ms,
  834. "syna,reset-delay-ms", 1);
  835. if (retval)
  836. return retval;
  837. return 0;
  838. }
  839. #else
  840. static inline int rmi_driver_of_probe(struct device *dev,
  841. struct rmi_device_platform_data *pdata)
  842. {
  843. return -ENODEV;
  844. }
  845. #endif
  846. int rmi_probe_interrupts(struct rmi_driver_data *data)
  847. {
  848. struct rmi_device *rmi_dev = data->rmi_dev;
  849. struct device *dev = &rmi_dev->dev;
  850. int irq_count;
  851. size_t size;
  852. int retval;
  853. /*
  854. * We need to count the IRQs and allocate their storage before scanning
  855. * the PDT and creating the function entries, because adding a new
  856. * function can trigger events that result in the IRQ related storage
  857. * being accessed.
  858. */
  859. rmi_dbg(RMI_DEBUG_CORE, dev, "%s: Counting IRQs.\n", __func__);
  860. irq_count = 0;
  861. data->bootloader_mode = false;
  862. retval = rmi_scan_pdt(rmi_dev, &irq_count, rmi_count_irqs);
  863. if (retval < 0) {
  864. dev_err(dev, "IRQ counting failed with code %d.\n", retval);
  865. return retval;
  866. }
  867. if (data->bootloader_mode)
  868. dev_warn(dev, "Device in bootloader mode.\n");
  869. data->irq_count = irq_count;
  870. data->num_of_irq_regs = (data->irq_count + 7) / 8;
  871. size = BITS_TO_LONGS(data->irq_count) * sizeof(unsigned long);
  872. data->irq_memory = devm_kzalloc(dev, size * 4, GFP_KERNEL);
  873. if (!data->irq_memory) {
  874. dev_err(dev, "Failed to allocate memory for irq masks.\n");
  875. return -ENOMEM;
  876. }
  877. data->irq_status = data->irq_memory + size * 0;
  878. data->fn_irq_bits = data->irq_memory + size * 1;
  879. data->current_irq_mask = data->irq_memory + size * 2;
  880. data->new_irq_mask = data->irq_memory + size * 3;
  881. return retval;
  882. }
  883. int rmi_init_functions(struct rmi_driver_data *data)
  884. {
  885. struct rmi_device *rmi_dev = data->rmi_dev;
  886. struct device *dev = &rmi_dev->dev;
  887. int irq_count;
  888. int retval;
  889. irq_count = 0;
  890. rmi_dbg(RMI_DEBUG_CORE, dev, "%s: Creating functions.\n", __func__);
  891. retval = rmi_scan_pdt(rmi_dev, &irq_count, rmi_create_function);
  892. if (retval < 0) {
  893. dev_err(dev, "Function creation failed with code %d.\n",
  894. retval);
  895. goto err_destroy_functions;
  896. }
  897. if (!data->f01_container) {
  898. dev_err(dev, "Missing F01 container!\n");
  899. retval = -EINVAL;
  900. goto err_destroy_functions;
  901. }
  902. retval = rmi_read_block(rmi_dev,
  903. data->f01_container->fd.control_base_addr + 1,
  904. data->current_irq_mask, data->num_of_irq_regs);
  905. if (retval < 0) {
  906. dev_err(dev, "%s: Failed to read current IRQ mask.\n",
  907. __func__);
  908. goto err_destroy_functions;
  909. }
  910. return 0;
  911. err_destroy_functions:
  912. rmi_free_function_list(rmi_dev);
  913. return retval;
  914. }
  915. static int rmi_driver_probe(struct device *dev)
  916. {
  917. struct rmi_driver *rmi_driver;
  918. struct rmi_driver_data *data;
  919. struct rmi_device_platform_data *pdata;
  920. struct rmi_device *rmi_dev;
  921. int retval;
  922. rmi_dbg(RMI_DEBUG_CORE, dev, "%s: Starting probe.\n",
  923. __func__);
  924. if (!rmi_is_physical_device(dev)) {
  925. rmi_dbg(RMI_DEBUG_CORE, dev, "Not a physical device.\n");
  926. return -ENODEV;
  927. }
  928. rmi_dev = to_rmi_device(dev);
  929. rmi_driver = to_rmi_driver(dev->driver);
  930. rmi_dev->driver = rmi_driver;
  931. pdata = rmi_get_platform_data(rmi_dev);
  932. if (rmi_dev->xport->dev->of_node) {
  933. retval = rmi_driver_of_probe(rmi_dev->xport->dev, pdata);
  934. if (retval)
  935. return retval;
  936. }
  937. data = devm_kzalloc(dev, sizeof(struct rmi_driver_data), GFP_KERNEL);
  938. if (!data)
  939. return -ENOMEM;
  940. INIT_LIST_HEAD(&data->function_list);
  941. data->rmi_dev = rmi_dev;
  942. dev_set_drvdata(&rmi_dev->dev, data);
  943. /*
  944. * Right before a warm boot, the sensor might be in some unusual state,
  945. * such as F54 diagnostics, or F34 bootloader mode after a firmware
  946. * or configuration update. In order to clear the sensor to a known
  947. * state and/or apply any updates, we issue a initial reset to clear any
  948. * previous settings and force it into normal operation.
  949. *
  950. * We have to do this before actually building the PDT because
  951. * the reflash updates (if any) might cause various registers to move
  952. * around.
  953. *
  954. * For a number of reasons, this initial reset may fail to return
  955. * within the specified time, but we'll still be able to bring up the
  956. * driver normally after that failure. This occurs most commonly in
  957. * a cold boot situation (where then firmware takes longer to come up
  958. * than from a warm boot) and the reset_delay_ms in the platform data
  959. * has been set too short to accommodate that. Since the sensor will
  960. * eventually come up and be usable, we don't want to just fail here
  961. * and leave the customer's device unusable. So we warn them, and
  962. * continue processing.
  963. */
  964. retval = rmi_scan_pdt(rmi_dev, NULL, rmi_initial_reset);
  965. if (retval < 0)
  966. dev_warn(dev, "RMI initial reset failed! Continuing in spite of this.\n");
  967. retval = rmi_read(rmi_dev, PDT_PROPERTIES_LOCATION, &data->pdt_props);
  968. if (retval < 0) {
  969. /*
  970. * we'll print out a warning and continue since
  971. * failure to get the PDT properties is not a cause to fail
  972. */
  973. dev_warn(dev, "Could not read PDT properties from %#06x (code %d). Assuming 0x00.\n",
  974. PDT_PROPERTIES_LOCATION, retval);
  975. }
  976. mutex_init(&data->irq_mutex);
  977. mutex_init(&data->enabled_mutex);
  978. retval = rmi_probe_interrupts(data);
  979. if (retval)
  980. goto err;
  981. if (rmi_dev->xport->input) {
  982. /*
  983. * The transport driver already has an input device.
  984. * In some cases it is preferable to reuse the transport
  985. * devices input device instead of creating a new one here.
  986. * One example is some HID touchpads report "pass-through"
  987. * button events are not reported by rmi registers.
  988. */
  989. data->input = rmi_dev->xport->input;
  990. } else {
  991. data->input = devm_input_allocate_device(dev);
  992. if (!data->input) {
  993. dev_err(dev, "%s: Failed to allocate input device.\n",
  994. __func__);
  995. retval = -ENOMEM;
  996. goto err;
  997. }
  998. rmi_driver_set_input_params(rmi_dev, data->input);
  999. data->input->phys = devm_kasprintf(dev, GFP_KERNEL,
  1000. "%s/input0", dev_name(dev));
  1001. }
  1002. retval = rmi_init_functions(data);
  1003. if (retval)
  1004. goto err;
  1005. retval = rmi_f34_create_sysfs(rmi_dev);
  1006. if (retval)
  1007. goto err;
  1008. if (data->input) {
  1009. rmi_driver_set_input_name(rmi_dev, data->input);
  1010. if (!rmi_dev->xport->input) {
  1011. if (input_register_device(data->input)) {
  1012. dev_err(dev, "%s: Failed to register input device.\n",
  1013. __func__);
  1014. goto err_destroy_functions;
  1015. }
  1016. }
  1017. }
  1018. retval = rmi_irq_init(rmi_dev);
  1019. if (retval < 0)
  1020. goto err_destroy_functions;
  1021. if (data->f01_container->dev.driver) {
  1022. /* Driver already bound, so enable ATTN now. */
  1023. retval = rmi_enable_sensor(rmi_dev);
  1024. if (retval)
  1025. goto err_disable_irq;
  1026. }
  1027. return 0;
  1028. err_disable_irq:
  1029. rmi_disable_irq(rmi_dev, false);
  1030. err_destroy_functions:
  1031. rmi_free_function_list(rmi_dev);
  1032. err:
  1033. return retval;
  1034. }
  1035. static struct rmi_driver rmi_physical_driver = {
  1036. .driver = {
  1037. .owner = THIS_MODULE,
  1038. .name = "rmi4_physical",
  1039. .bus = &rmi_bus_type,
  1040. .probe = rmi_driver_probe,
  1041. .remove = rmi_driver_remove,
  1042. },
  1043. .reset_handler = rmi_driver_reset_handler,
  1044. .clear_irq_bits = rmi_driver_clear_irq_bits,
  1045. .set_irq_bits = rmi_driver_set_irq_bits,
  1046. .set_input_params = rmi_driver_set_input_params,
  1047. };
  1048. bool rmi_is_physical_driver(struct device_driver *drv)
  1049. {
  1050. return drv == &rmi_physical_driver.driver;
  1051. }
  1052. int __init rmi_register_physical_driver(void)
  1053. {
  1054. int error;
  1055. error = driver_register(&rmi_physical_driver.driver);
  1056. if (error) {
  1057. pr_err("%s: driver register failed, code=%d.\n", __func__,
  1058. error);
  1059. return error;
  1060. }
  1061. return 0;
  1062. }
  1063. void __exit rmi_unregister_physical_driver(void)
  1064. {
  1065. driver_unregister(&rmi_physical_driver.driver);
  1066. }