regmap-irq.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841
  1. /*
  2. * regmap based irq_chip
  3. *
  4. * Copyright 2011 Wolfson Microelectronics plc
  5. *
  6. * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/device.h>
  13. #include <linux/export.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/irq.h>
  16. #include <linux/irqdomain.h>
  17. #include <linux/pm_runtime.h>
  18. #include <linux/regmap.h>
  19. #include <linux/slab.h>
  20. #include "internal.h"
  21. struct regmap_irq_chip_data {
  22. struct mutex lock;
  23. struct irq_chip irq_chip;
  24. struct regmap *map;
  25. const struct regmap_irq_chip *chip;
  26. int irq_base;
  27. struct irq_domain *domain;
  28. int irq;
  29. int wake_count;
  30. void *status_reg_buf;
  31. unsigned int *status_buf;
  32. unsigned int *mask_buf;
  33. unsigned int *mask_buf_def;
  34. unsigned int *wake_buf;
  35. unsigned int *type_buf;
  36. unsigned int *type_buf_def;
  37. unsigned int irq_reg_stride;
  38. unsigned int type_reg_stride;
  39. };
  40. static inline const
  41. struct regmap_irq *irq_to_regmap_irq(struct regmap_irq_chip_data *data,
  42. int irq)
  43. {
  44. return &data->chip->irqs[irq];
  45. }
  46. static void regmap_irq_lock(struct irq_data *data)
  47. {
  48. struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
  49. mutex_lock(&d->lock);
  50. }
  51. static void regmap_irq_sync_unlock(struct irq_data *data)
  52. {
  53. struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
  54. struct regmap *map = d->map;
  55. int i, ret;
  56. u32 reg;
  57. u32 unmask_offset;
  58. if (d->chip->runtime_pm) {
  59. ret = pm_runtime_get_sync(map->dev);
  60. if (ret < 0)
  61. dev_err(map->dev, "IRQ sync failed to resume: %d\n",
  62. ret);
  63. }
  64. /*
  65. * If there's been a change in the mask write it back to the
  66. * hardware. We rely on the use of the regmap core cache to
  67. * suppress pointless writes.
  68. */
  69. for (i = 0; i < d->chip->num_regs; i++) {
  70. reg = d->chip->mask_base +
  71. (i * map->reg_stride * d->irq_reg_stride);
  72. if (d->chip->mask_invert) {
  73. ret = regmap_update_bits(d->map, reg,
  74. d->mask_buf_def[i], ~d->mask_buf[i]);
  75. } else if (d->chip->unmask_base) {
  76. /* set mask with mask_base register */
  77. ret = regmap_update_bits(d->map, reg,
  78. d->mask_buf_def[i], ~d->mask_buf[i]);
  79. if (ret < 0)
  80. dev_err(d->map->dev,
  81. "Failed to sync unmasks in %x\n",
  82. reg);
  83. unmask_offset = d->chip->unmask_base -
  84. d->chip->mask_base;
  85. /* clear mask with unmask_base register */
  86. ret = regmap_update_bits(d->map,
  87. reg + unmask_offset,
  88. d->mask_buf_def[i],
  89. d->mask_buf[i]);
  90. } else {
  91. ret = regmap_update_bits(d->map, reg,
  92. d->mask_buf_def[i], d->mask_buf[i]);
  93. }
  94. if (ret != 0)
  95. dev_err(d->map->dev, "Failed to sync masks in %x\n",
  96. reg);
  97. reg = d->chip->wake_base +
  98. (i * map->reg_stride * d->irq_reg_stride);
  99. if (d->wake_buf) {
  100. if (d->chip->wake_invert)
  101. ret = regmap_update_bits(d->map, reg,
  102. d->mask_buf_def[i],
  103. ~d->wake_buf[i]);
  104. else
  105. ret = regmap_update_bits(d->map, reg,
  106. d->mask_buf_def[i],
  107. d->wake_buf[i]);
  108. if (ret != 0)
  109. dev_err(d->map->dev,
  110. "Failed to sync wakes in %x: %d\n",
  111. reg, ret);
  112. }
  113. if (!d->chip->init_ack_masked)
  114. continue;
  115. /*
  116. * Ack all the masked interrupts unconditionally,
  117. * OR if there is masked interrupt which hasn't been Acked,
  118. * it'll be ignored in irq handler, then may introduce irq storm
  119. */
  120. if (d->mask_buf[i] && (d->chip->ack_base || d->chip->use_ack)) {
  121. reg = d->chip->ack_base +
  122. (i * map->reg_stride * d->irq_reg_stride);
  123. /* some chips ack by write 0 */
  124. if (d->chip->ack_invert)
  125. ret = regmap_write(map, reg, ~d->mask_buf[i]);
  126. else
  127. ret = regmap_write(map, reg, d->mask_buf[i]);
  128. if (ret != 0)
  129. dev_err(d->map->dev, "Failed to ack 0x%x: %d\n",
  130. reg, ret);
  131. }
  132. }
  133. for (i = 0; i < d->chip->num_type_reg; i++) {
  134. if (!d->type_buf_def[i])
  135. continue;
  136. reg = d->chip->type_base +
  137. (i * map->reg_stride * d->type_reg_stride);
  138. if (d->chip->type_invert)
  139. ret = regmap_update_bits(d->map, reg,
  140. d->type_buf_def[i], ~d->type_buf[i]);
  141. else
  142. ret = regmap_update_bits(d->map, reg,
  143. d->type_buf_def[i], d->type_buf[i]);
  144. if (ret != 0)
  145. dev_err(d->map->dev, "Failed to sync type in %x\n",
  146. reg);
  147. }
  148. if (d->chip->runtime_pm)
  149. pm_runtime_put(map->dev);
  150. /* If we've changed our wakeup count propagate it to the parent */
  151. if (d->wake_count < 0)
  152. for (i = d->wake_count; i < 0; i++)
  153. irq_set_irq_wake(d->irq, 0);
  154. else if (d->wake_count > 0)
  155. for (i = 0; i < d->wake_count; i++)
  156. irq_set_irq_wake(d->irq, 1);
  157. d->wake_count = 0;
  158. mutex_unlock(&d->lock);
  159. }
  160. static void regmap_irq_enable(struct irq_data *data)
  161. {
  162. struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
  163. struct regmap *map = d->map;
  164. const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
  165. d->mask_buf[irq_data->reg_offset / map->reg_stride] &= ~irq_data->mask;
  166. }
  167. static void regmap_irq_disable(struct irq_data *data)
  168. {
  169. struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
  170. struct regmap *map = d->map;
  171. const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
  172. d->mask_buf[irq_data->reg_offset / map->reg_stride] |= irq_data->mask;
  173. }
  174. static int regmap_irq_set_type(struct irq_data *data, unsigned int type)
  175. {
  176. struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
  177. struct regmap *map = d->map;
  178. const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
  179. int reg = irq_data->type_reg_offset / map->reg_stride;
  180. if (!(irq_data->type_rising_mask | irq_data->type_falling_mask))
  181. return 0;
  182. d->type_buf[reg] &= ~(irq_data->type_falling_mask |
  183. irq_data->type_rising_mask);
  184. switch (type) {
  185. case IRQ_TYPE_EDGE_FALLING:
  186. d->type_buf[reg] |= irq_data->type_falling_mask;
  187. break;
  188. case IRQ_TYPE_EDGE_RISING:
  189. d->type_buf[reg] |= irq_data->type_rising_mask;
  190. break;
  191. case IRQ_TYPE_EDGE_BOTH:
  192. d->type_buf[reg] |= (irq_data->type_falling_mask |
  193. irq_data->type_rising_mask);
  194. break;
  195. default:
  196. return -EINVAL;
  197. }
  198. return 0;
  199. }
  200. static int regmap_irq_set_wake(struct irq_data *data, unsigned int on)
  201. {
  202. struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
  203. struct regmap *map = d->map;
  204. const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
  205. if (on) {
  206. if (d->wake_buf)
  207. d->wake_buf[irq_data->reg_offset / map->reg_stride]
  208. &= ~irq_data->mask;
  209. d->wake_count++;
  210. } else {
  211. if (d->wake_buf)
  212. d->wake_buf[irq_data->reg_offset / map->reg_stride]
  213. |= irq_data->mask;
  214. d->wake_count--;
  215. }
  216. return 0;
  217. }
  218. static const struct irq_chip regmap_irq_chip = {
  219. .irq_bus_lock = regmap_irq_lock,
  220. .irq_bus_sync_unlock = regmap_irq_sync_unlock,
  221. .irq_disable = regmap_irq_disable,
  222. .irq_enable = regmap_irq_enable,
  223. .irq_set_type = regmap_irq_set_type,
  224. .irq_set_wake = regmap_irq_set_wake,
  225. };
  226. static irqreturn_t regmap_irq_thread(int irq, void *d)
  227. {
  228. struct regmap_irq_chip_data *data = d;
  229. const struct regmap_irq_chip *chip = data->chip;
  230. struct regmap *map = data->map;
  231. int ret, i;
  232. bool handled = false;
  233. u32 reg;
  234. if (chip->handle_pre_irq)
  235. chip->handle_pre_irq(chip->irq_drv_data);
  236. if (chip->runtime_pm) {
  237. ret = pm_runtime_get_sync(map->dev);
  238. if (ret < 0) {
  239. dev_err(map->dev, "IRQ thread failed to resume: %d\n",
  240. ret);
  241. pm_runtime_put(map->dev);
  242. goto exit;
  243. }
  244. }
  245. /*
  246. * Read in the statuses, using a single bulk read if possible
  247. * in order to reduce the I/O overheads.
  248. */
  249. if (!map->use_single_read && map->reg_stride == 1 &&
  250. data->irq_reg_stride == 1) {
  251. u8 *buf8 = data->status_reg_buf;
  252. u16 *buf16 = data->status_reg_buf;
  253. u32 *buf32 = data->status_reg_buf;
  254. BUG_ON(!data->status_reg_buf);
  255. ret = regmap_bulk_read(map, chip->status_base,
  256. data->status_reg_buf,
  257. chip->num_regs);
  258. if (ret != 0) {
  259. dev_err(map->dev, "Failed to read IRQ status: %d\n",
  260. ret);
  261. goto exit;
  262. }
  263. for (i = 0; i < data->chip->num_regs; i++) {
  264. switch (map->format.val_bytes) {
  265. case 1:
  266. data->status_buf[i] = buf8[i];
  267. break;
  268. case 2:
  269. data->status_buf[i] = buf16[i];
  270. break;
  271. case 4:
  272. data->status_buf[i] = buf32[i];
  273. break;
  274. default:
  275. BUG();
  276. goto exit;
  277. }
  278. }
  279. } else {
  280. for (i = 0; i < data->chip->num_regs; i++) {
  281. ret = regmap_read(map, chip->status_base +
  282. (i * map->reg_stride
  283. * data->irq_reg_stride),
  284. &data->status_buf[i]);
  285. if (ret != 0) {
  286. dev_err(map->dev,
  287. "Failed to read IRQ status: %d\n",
  288. ret);
  289. if (chip->runtime_pm)
  290. pm_runtime_put(map->dev);
  291. goto exit;
  292. }
  293. }
  294. }
  295. /*
  296. * Ignore masked IRQs and ack if we need to; we ack early so
  297. * there is no race between handling and acknowleding the
  298. * interrupt. We assume that typically few of the interrupts
  299. * will fire simultaneously so don't worry about overhead from
  300. * doing a write per register.
  301. */
  302. for (i = 0; i < data->chip->num_regs; i++) {
  303. data->status_buf[i] &= ~data->mask_buf[i];
  304. if (data->status_buf[i] && (chip->ack_base || chip->use_ack)) {
  305. reg = chip->ack_base +
  306. (i * map->reg_stride * data->irq_reg_stride);
  307. ret = regmap_write(map, reg, data->status_buf[i]);
  308. if (ret != 0)
  309. dev_err(map->dev, "Failed to ack 0x%x: %d\n",
  310. reg, ret);
  311. }
  312. }
  313. for (i = 0; i < chip->num_irqs; i++) {
  314. if (data->status_buf[chip->irqs[i].reg_offset /
  315. map->reg_stride] & chip->irqs[i].mask) {
  316. handle_nested_irq(irq_find_mapping(data->domain, i));
  317. handled = true;
  318. }
  319. }
  320. if (chip->runtime_pm)
  321. pm_runtime_put(map->dev);
  322. exit:
  323. if (chip->handle_post_irq)
  324. chip->handle_post_irq(chip->irq_drv_data);
  325. if (handled)
  326. return IRQ_HANDLED;
  327. else
  328. return IRQ_NONE;
  329. }
  330. static int regmap_irq_map(struct irq_domain *h, unsigned int virq,
  331. irq_hw_number_t hw)
  332. {
  333. struct regmap_irq_chip_data *data = h->host_data;
  334. irq_set_chip_data(virq, data);
  335. irq_set_chip(virq, &data->irq_chip);
  336. irq_set_nested_thread(virq, 1);
  337. irq_set_parent(virq, data->irq);
  338. irq_set_noprobe(virq);
  339. return 0;
  340. }
  341. static const struct irq_domain_ops regmap_domain_ops = {
  342. .map = regmap_irq_map,
  343. .xlate = irq_domain_xlate_twocell,
  344. };
  345. /**
  346. * regmap_add_irq_chip() - Use standard regmap IRQ controller handling
  347. *
  348. * @map: The regmap for the device.
  349. * @irq: The IRQ the device uses to signal interrupts.
  350. * @irq_flags: The IRQF_ flags to use for the primary interrupt.
  351. * @irq_base: Allocate at specific IRQ number if irq_base > 0.
  352. * @chip: Configuration for the interrupt controller.
  353. * @data: Runtime data structure for the controller, allocated on success.
  354. *
  355. * Returns 0 on success or an errno on failure.
  356. *
  357. * In order for this to be efficient the chip really should use a
  358. * register cache. The chip driver is responsible for restoring the
  359. * register values used by the IRQ controller over suspend and resume.
  360. */
  361. int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
  362. int irq_base, const struct regmap_irq_chip *chip,
  363. struct regmap_irq_chip_data **data)
  364. {
  365. struct regmap_irq_chip_data *d;
  366. int i;
  367. int ret = -ENOMEM;
  368. u32 reg;
  369. u32 unmask_offset;
  370. if (chip->num_regs <= 0)
  371. return -EINVAL;
  372. for (i = 0; i < chip->num_irqs; i++) {
  373. if (chip->irqs[i].reg_offset % map->reg_stride)
  374. return -EINVAL;
  375. if (chip->irqs[i].reg_offset / map->reg_stride >=
  376. chip->num_regs)
  377. return -EINVAL;
  378. }
  379. if (irq_base) {
  380. irq_base = irq_alloc_descs(irq_base, 0, chip->num_irqs, 0);
  381. if (irq_base < 0) {
  382. dev_warn(map->dev, "Failed to allocate IRQs: %d\n",
  383. irq_base);
  384. return irq_base;
  385. }
  386. }
  387. d = kzalloc(sizeof(*d), GFP_KERNEL);
  388. if (!d)
  389. return -ENOMEM;
  390. d->status_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
  391. GFP_KERNEL);
  392. if (!d->status_buf)
  393. goto err_alloc;
  394. d->mask_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
  395. GFP_KERNEL);
  396. if (!d->mask_buf)
  397. goto err_alloc;
  398. d->mask_buf_def = kcalloc(chip->num_regs, sizeof(unsigned int),
  399. GFP_KERNEL);
  400. if (!d->mask_buf_def)
  401. goto err_alloc;
  402. if (chip->wake_base) {
  403. d->wake_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
  404. GFP_KERNEL);
  405. if (!d->wake_buf)
  406. goto err_alloc;
  407. }
  408. if (chip->num_type_reg) {
  409. d->type_buf_def = kcalloc(chip->num_type_reg,
  410. sizeof(unsigned int), GFP_KERNEL);
  411. if (!d->type_buf_def)
  412. goto err_alloc;
  413. d->type_buf = kcalloc(chip->num_type_reg, sizeof(unsigned int),
  414. GFP_KERNEL);
  415. if (!d->type_buf)
  416. goto err_alloc;
  417. }
  418. d->irq_chip = regmap_irq_chip;
  419. d->irq_chip.name = chip->name;
  420. d->irq = irq;
  421. d->map = map;
  422. d->chip = chip;
  423. d->irq_base = irq_base;
  424. if (chip->irq_reg_stride)
  425. d->irq_reg_stride = chip->irq_reg_stride;
  426. else
  427. d->irq_reg_stride = 1;
  428. if (chip->type_reg_stride)
  429. d->type_reg_stride = chip->type_reg_stride;
  430. else
  431. d->type_reg_stride = 1;
  432. if (!map->use_single_read && map->reg_stride == 1 &&
  433. d->irq_reg_stride == 1) {
  434. d->status_reg_buf = kmalloc_array(chip->num_regs,
  435. map->format.val_bytes,
  436. GFP_KERNEL);
  437. if (!d->status_reg_buf)
  438. goto err_alloc;
  439. }
  440. mutex_init(&d->lock);
  441. for (i = 0; i < chip->num_irqs; i++)
  442. d->mask_buf_def[chip->irqs[i].reg_offset / map->reg_stride]
  443. |= chip->irqs[i].mask;
  444. /* Mask all the interrupts by default */
  445. for (i = 0; i < chip->num_regs; i++) {
  446. d->mask_buf[i] = d->mask_buf_def[i];
  447. reg = chip->mask_base +
  448. (i * map->reg_stride * d->irq_reg_stride);
  449. if (chip->mask_invert)
  450. ret = regmap_update_bits(map, reg,
  451. d->mask_buf[i], ~d->mask_buf[i]);
  452. else if (d->chip->unmask_base) {
  453. unmask_offset = d->chip->unmask_base -
  454. d->chip->mask_base;
  455. ret = regmap_update_bits(d->map,
  456. reg + unmask_offset,
  457. d->mask_buf[i],
  458. d->mask_buf[i]);
  459. } else
  460. ret = regmap_update_bits(map, reg,
  461. d->mask_buf[i], d->mask_buf[i]);
  462. if (ret != 0) {
  463. dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
  464. reg, ret);
  465. goto err_alloc;
  466. }
  467. if (!chip->init_ack_masked)
  468. continue;
  469. /* Ack masked but set interrupts */
  470. reg = chip->status_base +
  471. (i * map->reg_stride * d->irq_reg_stride);
  472. ret = regmap_read(map, reg, &d->status_buf[i]);
  473. if (ret != 0) {
  474. dev_err(map->dev, "Failed to read IRQ status: %d\n",
  475. ret);
  476. goto err_alloc;
  477. }
  478. if (d->status_buf[i] && (chip->ack_base || chip->use_ack)) {
  479. reg = chip->ack_base +
  480. (i * map->reg_stride * d->irq_reg_stride);
  481. if (chip->ack_invert)
  482. ret = regmap_write(map, reg,
  483. ~(d->status_buf[i] & d->mask_buf[i]));
  484. else
  485. ret = regmap_write(map, reg,
  486. d->status_buf[i] & d->mask_buf[i]);
  487. if (ret != 0) {
  488. dev_err(map->dev, "Failed to ack 0x%x: %d\n",
  489. reg, ret);
  490. goto err_alloc;
  491. }
  492. }
  493. }
  494. /* Wake is disabled by default */
  495. if (d->wake_buf) {
  496. for (i = 0; i < chip->num_regs; i++) {
  497. d->wake_buf[i] = d->mask_buf_def[i];
  498. reg = chip->wake_base +
  499. (i * map->reg_stride * d->irq_reg_stride);
  500. if (chip->wake_invert)
  501. ret = regmap_update_bits(map, reg,
  502. d->mask_buf_def[i],
  503. 0);
  504. else
  505. ret = regmap_update_bits(map, reg,
  506. d->mask_buf_def[i],
  507. d->wake_buf[i]);
  508. if (ret != 0) {
  509. dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
  510. reg, ret);
  511. goto err_alloc;
  512. }
  513. }
  514. }
  515. if (chip->num_type_reg) {
  516. for (i = 0; i < chip->num_irqs; i++) {
  517. reg = chip->irqs[i].type_reg_offset / map->reg_stride;
  518. d->type_buf_def[reg] |= chip->irqs[i].type_rising_mask |
  519. chip->irqs[i].type_falling_mask;
  520. }
  521. for (i = 0; i < chip->num_type_reg; ++i) {
  522. if (!d->type_buf_def[i])
  523. continue;
  524. reg = chip->type_base +
  525. (i * map->reg_stride * d->type_reg_stride);
  526. if (chip->type_invert)
  527. ret = regmap_update_bits(map, reg,
  528. d->type_buf_def[i], 0xFF);
  529. else
  530. ret = regmap_update_bits(map, reg,
  531. d->type_buf_def[i], 0x0);
  532. if (ret != 0) {
  533. dev_err(map->dev,
  534. "Failed to set type in 0x%x: %x\n",
  535. reg, ret);
  536. goto err_alloc;
  537. }
  538. }
  539. }
  540. if (irq_base)
  541. d->domain = irq_domain_add_legacy(map->dev->of_node,
  542. chip->num_irqs, irq_base, 0,
  543. &regmap_domain_ops, d);
  544. else
  545. d->domain = irq_domain_add_linear(map->dev->of_node,
  546. chip->num_irqs,
  547. &regmap_domain_ops, d);
  548. if (!d->domain) {
  549. dev_err(map->dev, "Failed to create IRQ domain\n");
  550. ret = -ENOMEM;
  551. goto err_alloc;
  552. }
  553. ret = request_threaded_irq(irq, NULL, regmap_irq_thread,
  554. irq_flags | IRQF_ONESHOT,
  555. chip->name, d);
  556. if (ret != 0) {
  557. dev_err(map->dev, "Failed to request IRQ %d for %s: %d\n",
  558. irq, chip->name, ret);
  559. goto err_domain;
  560. }
  561. *data = d;
  562. return 0;
  563. err_domain:
  564. /* Should really dispose of the domain but... */
  565. err_alloc:
  566. kfree(d->type_buf);
  567. kfree(d->type_buf_def);
  568. kfree(d->wake_buf);
  569. kfree(d->mask_buf_def);
  570. kfree(d->mask_buf);
  571. kfree(d->status_buf);
  572. kfree(d->status_reg_buf);
  573. kfree(d);
  574. return ret;
  575. }
  576. EXPORT_SYMBOL_GPL(regmap_add_irq_chip);
  577. /**
  578. * regmap_del_irq_chip() - Stop interrupt handling for a regmap IRQ chip
  579. *
  580. * @irq: Primary IRQ for the device
  581. * @d: &regmap_irq_chip_data allocated by regmap_add_irq_chip()
  582. *
  583. * This function also disposes of all mapped IRQs on the chip.
  584. */
  585. void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
  586. {
  587. unsigned int virq;
  588. int hwirq;
  589. if (!d)
  590. return;
  591. free_irq(irq, d);
  592. /* Dispose all virtual irq from irq domain before removing it */
  593. for (hwirq = 0; hwirq < d->chip->num_irqs; hwirq++) {
  594. /* Ignore hwirq if holes in the IRQ list */
  595. if (!d->chip->irqs[hwirq].mask)
  596. continue;
  597. /*
  598. * Find the virtual irq of hwirq on chip and if it is
  599. * there then dispose it
  600. */
  601. virq = irq_find_mapping(d->domain, hwirq);
  602. if (virq)
  603. irq_dispose_mapping(virq);
  604. }
  605. irq_domain_remove(d->domain);
  606. kfree(d->type_buf);
  607. kfree(d->type_buf_def);
  608. kfree(d->wake_buf);
  609. kfree(d->mask_buf_def);
  610. kfree(d->mask_buf);
  611. kfree(d->status_reg_buf);
  612. kfree(d->status_buf);
  613. kfree(d);
  614. }
  615. EXPORT_SYMBOL_GPL(regmap_del_irq_chip);
  616. static void devm_regmap_irq_chip_release(struct device *dev, void *res)
  617. {
  618. struct regmap_irq_chip_data *d = *(struct regmap_irq_chip_data **)res;
  619. regmap_del_irq_chip(d->irq, d);
  620. }
  621. static int devm_regmap_irq_chip_match(struct device *dev, void *res, void *data)
  622. {
  623. struct regmap_irq_chip_data **r = res;
  624. if (!r || !*r) {
  625. WARN_ON(!r || !*r);
  626. return 0;
  627. }
  628. return *r == data;
  629. }
  630. /**
  631. * devm_regmap_add_irq_chip() - Resource manager regmap_add_irq_chip()
  632. *
  633. * @dev: The device pointer on which irq_chip belongs to.
  634. * @map: The regmap for the device.
  635. * @irq: The IRQ the device uses to signal interrupts
  636. * @irq_flags: The IRQF_ flags to use for the primary interrupt.
  637. * @irq_base: Allocate at specific IRQ number if irq_base > 0.
  638. * @chip: Configuration for the interrupt controller.
  639. * @data: Runtime data structure for the controller, allocated on success
  640. *
  641. * Returns 0 on success or an errno on failure.
  642. *
  643. * The &regmap_irq_chip_data will be automatically released when the device is
  644. * unbound.
  645. */
  646. int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq,
  647. int irq_flags, int irq_base,
  648. const struct regmap_irq_chip *chip,
  649. struct regmap_irq_chip_data **data)
  650. {
  651. struct regmap_irq_chip_data **ptr, *d;
  652. int ret;
  653. ptr = devres_alloc(devm_regmap_irq_chip_release, sizeof(*ptr),
  654. GFP_KERNEL);
  655. if (!ptr)
  656. return -ENOMEM;
  657. ret = regmap_add_irq_chip(map, irq, irq_flags, irq_base,
  658. chip, &d);
  659. if (ret < 0) {
  660. devres_free(ptr);
  661. return ret;
  662. }
  663. *ptr = d;
  664. devres_add(dev, ptr);
  665. *data = d;
  666. return 0;
  667. }
  668. EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip);
  669. /**
  670. * devm_regmap_del_irq_chip() - Resource managed regmap_del_irq_chip()
  671. *
  672. * @dev: Device for which which resource was allocated.
  673. * @irq: Primary IRQ for the device.
  674. * @data: &regmap_irq_chip_data allocated by regmap_add_irq_chip().
  675. *
  676. * A resource managed version of regmap_del_irq_chip().
  677. */
  678. void devm_regmap_del_irq_chip(struct device *dev, int irq,
  679. struct regmap_irq_chip_data *data)
  680. {
  681. int rc;
  682. WARN_ON(irq != data->irq);
  683. rc = devres_release(dev, devm_regmap_irq_chip_release,
  684. devm_regmap_irq_chip_match, data);
  685. if (rc != 0)
  686. WARN_ON(rc);
  687. }
  688. EXPORT_SYMBOL_GPL(devm_regmap_del_irq_chip);
  689. /**
  690. * regmap_irq_chip_get_base() - Retrieve interrupt base for a regmap IRQ chip
  691. *
  692. * @data: regmap irq controller to operate on.
  693. *
  694. * Useful for drivers to request their own IRQs.
  695. */
  696. int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data)
  697. {
  698. WARN_ON(!data->irq_base);
  699. return data->irq_base;
  700. }
  701. EXPORT_SYMBOL_GPL(regmap_irq_chip_get_base);
  702. /**
  703. * regmap_irq_get_virq() - Map an interrupt on a chip to a virtual IRQ
  704. *
  705. * @data: regmap irq controller to operate on.
  706. * @irq: index of the interrupt requested in the chip IRQs.
  707. *
  708. * Useful for drivers to request their own IRQs.
  709. */
  710. int regmap_irq_get_virq(struct regmap_irq_chip_data *data, int irq)
  711. {
  712. /* Handle holes in the IRQ list */
  713. if (!data->chip->irqs[irq].mask)
  714. return -EINVAL;
  715. return irq_create_mapping(data->domain, irq);
  716. }
  717. EXPORT_SYMBOL_GPL(regmap_irq_get_virq);
  718. /**
  719. * regmap_irq_get_domain() - Retrieve the irq_domain for the chip
  720. *
  721. * @data: regmap_irq controller to operate on.
  722. *
  723. * Useful for drivers to request their own IRQs and for integration
  724. * with subsystems. For ease of integration NULL is accepted as a
  725. * domain, allowing devices to just call this even if no domain is
  726. * allocated.
  727. */
  728. struct irq_domain *regmap_irq_get_domain(struct regmap_irq_chip_data *data)
  729. {
  730. if (data)
  731. return data->domain;
  732. else
  733. return NULL;
  734. }
  735. EXPORT_SYMBOL_GPL(regmap_irq_get_domain);