regmap-irq.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851
  1. /*
  2. * regmap based irq_chip
  3. *
  4. * Copyright 2011 Wolfson Microelectronics plc
  5. *
  6. * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/device.h>
  13. #include <linux/export.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/irq.h>
  16. #include <linux/irqdomain.h>
  17. #include <linux/pm_runtime.h>
  18. #include <linux/regmap.h>
  19. #include <linux/slab.h>
  20. #include "internal.h"
  21. struct regmap_irq_chip_data {
  22. struct mutex lock;
  23. struct irq_chip irq_chip;
  24. struct regmap *map;
  25. const struct regmap_irq_chip *chip;
  26. int irq_base;
  27. struct irq_domain *domain;
  28. int irq;
  29. int wake_count;
  30. void *status_reg_buf;
  31. unsigned int *status_buf;
  32. unsigned int *mask_buf;
  33. unsigned int *mask_buf_def;
  34. unsigned int *wake_buf;
  35. unsigned int *type_buf;
  36. unsigned int *type_buf_def;
  37. unsigned int irq_reg_stride;
  38. unsigned int type_reg_stride;
  39. };
  40. static inline const
  41. struct regmap_irq *irq_to_regmap_irq(struct regmap_irq_chip_data *data,
  42. int irq)
  43. {
  44. return &data->chip->irqs[irq];
  45. }
  46. static void regmap_irq_lock(struct irq_data *data)
  47. {
  48. struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
  49. mutex_lock(&d->lock);
  50. }
  51. static int regmap_irq_update_bits(struct regmap_irq_chip_data *d,
  52. unsigned int reg, unsigned int mask,
  53. unsigned int val)
  54. {
  55. if (d->chip->mask_writeonly)
  56. return regmap_write_bits(d->map, reg, mask, val);
  57. else
  58. return regmap_update_bits(d->map, reg, mask, val);
  59. }
  60. static void regmap_irq_sync_unlock(struct irq_data *data)
  61. {
  62. struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
  63. struct regmap *map = d->map;
  64. int i, ret;
  65. u32 reg;
  66. u32 unmask_offset;
  67. if (d->chip->runtime_pm) {
  68. ret = pm_runtime_get_sync(map->dev);
  69. if (ret < 0)
  70. dev_err(map->dev, "IRQ sync failed to resume: %d\n",
  71. ret);
  72. }
  73. /*
  74. * If there's been a change in the mask write it back to the
  75. * hardware. We rely on the use of the regmap core cache to
  76. * suppress pointless writes.
  77. */
  78. for (i = 0; i < d->chip->num_regs; i++) {
  79. reg = d->chip->mask_base +
  80. (i * map->reg_stride * d->irq_reg_stride);
  81. if (d->chip->mask_invert) {
  82. ret = regmap_irq_update_bits(d, reg,
  83. d->mask_buf_def[i], ~d->mask_buf[i]);
  84. } else if (d->chip->unmask_base) {
  85. /* set mask with mask_base register */
  86. ret = regmap_irq_update_bits(d, reg,
  87. d->mask_buf_def[i], ~d->mask_buf[i]);
  88. if (ret < 0)
  89. dev_err(d->map->dev,
  90. "Failed to sync unmasks in %x\n",
  91. reg);
  92. unmask_offset = d->chip->unmask_base -
  93. d->chip->mask_base;
  94. /* clear mask with unmask_base register */
  95. ret = regmap_irq_update_bits(d,
  96. reg + unmask_offset,
  97. d->mask_buf_def[i],
  98. d->mask_buf[i]);
  99. } else {
  100. ret = regmap_irq_update_bits(d, reg,
  101. d->mask_buf_def[i], d->mask_buf[i]);
  102. }
  103. if (ret != 0)
  104. dev_err(d->map->dev, "Failed to sync masks in %x\n",
  105. reg);
  106. reg = d->chip->wake_base +
  107. (i * map->reg_stride * d->irq_reg_stride);
  108. if (d->wake_buf) {
  109. if (d->chip->wake_invert)
  110. ret = regmap_irq_update_bits(d, reg,
  111. d->mask_buf_def[i],
  112. ~d->wake_buf[i]);
  113. else
  114. ret = regmap_irq_update_bits(d, reg,
  115. d->mask_buf_def[i],
  116. d->wake_buf[i]);
  117. if (ret != 0)
  118. dev_err(d->map->dev,
  119. "Failed to sync wakes in %x: %d\n",
  120. reg, ret);
  121. }
  122. if (!d->chip->init_ack_masked)
  123. continue;
  124. /*
  125. * Ack all the masked interrupts unconditionally,
  126. * OR if there is masked interrupt which hasn't been Acked,
  127. * it'll be ignored in irq handler, then may introduce irq storm
  128. */
  129. if (d->mask_buf[i] && (d->chip->ack_base || d->chip->use_ack)) {
  130. reg = d->chip->ack_base +
  131. (i * map->reg_stride * d->irq_reg_stride);
  132. /* some chips ack by write 0 */
  133. if (d->chip->ack_invert)
  134. ret = regmap_write(map, reg, ~d->mask_buf[i]);
  135. else
  136. ret = regmap_write(map, reg, d->mask_buf[i]);
  137. if (ret != 0)
  138. dev_err(d->map->dev, "Failed to ack 0x%x: %d\n",
  139. reg, ret);
  140. }
  141. }
  142. for (i = 0; i < d->chip->num_type_reg; i++) {
  143. if (!d->type_buf_def[i])
  144. continue;
  145. reg = d->chip->type_base +
  146. (i * map->reg_stride * d->type_reg_stride);
  147. if (d->chip->type_invert)
  148. ret = regmap_irq_update_bits(d, reg,
  149. d->type_buf_def[i], ~d->type_buf[i]);
  150. else
  151. ret = regmap_irq_update_bits(d, reg,
  152. d->type_buf_def[i], d->type_buf[i]);
  153. if (ret != 0)
  154. dev_err(d->map->dev, "Failed to sync type in %x\n",
  155. reg);
  156. }
  157. if (d->chip->runtime_pm)
  158. pm_runtime_put(map->dev);
  159. /* If we've changed our wakeup count propagate it to the parent */
  160. if (d->wake_count < 0)
  161. for (i = d->wake_count; i < 0; i++)
  162. irq_set_irq_wake(d->irq, 0);
  163. else if (d->wake_count > 0)
  164. for (i = 0; i < d->wake_count; i++)
  165. irq_set_irq_wake(d->irq, 1);
  166. d->wake_count = 0;
  167. mutex_unlock(&d->lock);
  168. }
  169. static void regmap_irq_enable(struct irq_data *data)
  170. {
  171. struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
  172. struct regmap *map = d->map;
  173. const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
  174. d->mask_buf[irq_data->reg_offset / map->reg_stride] &= ~irq_data->mask;
  175. }
  176. static void regmap_irq_disable(struct irq_data *data)
  177. {
  178. struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
  179. struct regmap *map = d->map;
  180. const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
  181. d->mask_buf[irq_data->reg_offset / map->reg_stride] |= irq_data->mask;
  182. }
  183. static int regmap_irq_set_type(struct irq_data *data, unsigned int type)
  184. {
  185. struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
  186. struct regmap *map = d->map;
  187. const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
  188. int reg = irq_data->type_reg_offset / map->reg_stride;
  189. if (!(irq_data->type_rising_mask | irq_data->type_falling_mask))
  190. return 0;
  191. d->type_buf[reg] &= ~(irq_data->type_falling_mask |
  192. irq_data->type_rising_mask);
  193. switch (type) {
  194. case IRQ_TYPE_EDGE_FALLING:
  195. d->type_buf[reg] |= irq_data->type_falling_mask;
  196. break;
  197. case IRQ_TYPE_EDGE_RISING:
  198. d->type_buf[reg] |= irq_data->type_rising_mask;
  199. break;
  200. case IRQ_TYPE_EDGE_BOTH:
  201. d->type_buf[reg] |= (irq_data->type_falling_mask |
  202. irq_data->type_rising_mask);
  203. break;
  204. default:
  205. return -EINVAL;
  206. }
  207. return 0;
  208. }
  209. static int regmap_irq_set_wake(struct irq_data *data, unsigned int on)
  210. {
  211. struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
  212. struct regmap *map = d->map;
  213. const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
  214. if (on) {
  215. if (d->wake_buf)
  216. d->wake_buf[irq_data->reg_offset / map->reg_stride]
  217. &= ~irq_data->mask;
  218. d->wake_count++;
  219. } else {
  220. if (d->wake_buf)
  221. d->wake_buf[irq_data->reg_offset / map->reg_stride]
  222. |= irq_data->mask;
  223. d->wake_count--;
  224. }
  225. return 0;
  226. }
  227. static const struct irq_chip regmap_irq_chip = {
  228. .irq_bus_lock = regmap_irq_lock,
  229. .irq_bus_sync_unlock = regmap_irq_sync_unlock,
  230. .irq_disable = regmap_irq_disable,
  231. .irq_enable = regmap_irq_enable,
  232. .irq_set_type = regmap_irq_set_type,
  233. .irq_set_wake = regmap_irq_set_wake,
  234. };
  235. static irqreturn_t regmap_irq_thread(int irq, void *d)
  236. {
  237. struct regmap_irq_chip_data *data = d;
  238. const struct regmap_irq_chip *chip = data->chip;
  239. struct regmap *map = data->map;
  240. int ret, i;
  241. bool handled = false;
  242. u32 reg;
  243. if (chip->handle_pre_irq)
  244. chip->handle_pre_irq(chip->irq_drv_data);
  245. if (chip->runtime_pm) {
  246. ret = pm_runtime_get_sync(map->dev);
  247. if (ret < 0) {
  248. dev_err(map->dev, "IRQ thread failed to resume: %d\n",
  249. ret);
  250. pm_runtime_put(map->dev);
  251. goto exit;
  252. }
  253. }
  254. /*
  255. * Read in the statuses, using a single bulk read if possible
  256. * in order to reduce the I/O overheads.
  257. */
  258. if (!map->use_single_read && map->reg_stride == 1 &&
  259. data->irq_reg_stride == 1) {
  260. u8 *buf8 = data->status_reg_buf;
  261. u16 *buf16 = data->status_reg_buf;
  262. u32 *buf32 = data->status_reg_buf;
  263. BUG_ON(!data->status_reg_buf);
  264. ret = regmap_bulk_read(map, chip->status_base,
  265. data->status_reg_buf,
  266. chip->num_regs);
  267. if (ret != 0) {
  268. dev_err(map->dev, "Failed to read IRQ status: %d\n",
  269. ret);
  270. goto exit;
  271. }
  272. for (i = 0; i < data->chip->num_regs; i++) {
  273. switch (map->format.val_bytes) {
  274. case 1:
  275. data->status_buf[i] = buf8[i];
  276. break;
  277. case 2:
  278. data->status_buf[i] = buf16[i];
  279. break;
  280. case 4:
  281. data->status_buf[i] = buf32[i];
  282. break;
  283. default:
  284. BUG();
  285. goto exit;
  286. }
  287. }
  288. } else {
  289. for (i = 0; i < data->chip->num_regs; i++) {
  290. ret = regmap_read(map, chip->status_base +
  291. (i * map->reg_stride
  292. * data->irq_reg_stride),
  293. &data->status_buf[i]);
  294. if (ret != 0) {
  295. dev_err(map->dev,
  296. "Failed to read IRQ status: %d\n",
  297. ret);
  298. if (chip->runtime_pm)
  299. pm_runtime_put(map->dev);
  300. goto exit;
  301. }
  302. }
  303. }
  304. /*
  305. * Ignore masked IRQs and ack if we need to; we ack early so
  306. * there is no race between handling and acknowleding the
  307. * interrupt. We assume that typically few of the interrupts
  308. * will fire simultaneously so don't worry about overhead from
  309. * doing a write per register.
  310. */
  311. for (i = 0; i < data->chip->num_regs; i++) {
  312. data->status_buf[i] &= ~data->mask_buf[i];
  313. if (data->status_buf[i] && (chip->ack_base || chip->use_ack)) {
  314. reg = chip->ack_base +
  315. (i * map->reg_stride * data->irq_reg_stride);
  316. ret = regmap_write(map, reg, data->status_buf[i]);
  317. if (ret != 0)
  318. dev_err(map->dev, "Failed to ack 0x%x: %d\n",
  319. reg, ret);
  320. }
  321. }
  322. for (i = 0; i < chip->num_irqs; i++) {
  323. if (data->status_buf[chip->irqs[i].reg_offset /
  324. map->reg_stride] & chip->irqs[i].mask) {
  325. handle_nested_irq(irq_find_mapping(data->domain, i));
  326. handled = true;
  327. }
  328. }
  329. if (chip->runtime_pm)
  330. pm_runtime_put(map->dev);
  331. exit:
  332. if (chip->handle_post_irq)
  333. chip->handle_post_irq(chip->irq_drv_data);
  334. if (handled)
  335. return IRQ_HANDLED;
  336. else
  337. return IRQ_NONE;
  338. }
  339. static int regmap_irq_map(struct irq_domain *h, unsigned int virq,
  340. irq_hw_number_t hw)
  341. {
  342. struct regmap_irq_chip_data *data = h->host_data;
  343. irq_set_chip_data(virq, data);
  344. irq_set_chip(virq, &data->irq_chip);
  345. irq_set_nested_thread(virq, 1);
  346. irq_set_parent(virq, data->irq);
  347. irq_set_noprobe(virq);
  348. return 0;
  349. }
  350. static const struct irq_domain_ops regmap_domain_ops = {
  351. .map = regmap_irq_map,
  352. .xlate = irq_domain_xlate_onetwocell,
  353. };
  354. /**
  355. * regmap_add_irq_chip() - Use standard regmap IRQ controller handling
  356. *
  357. * @map: The regmap for the device.
  358. * @irq: The IRQ the device uses to signal interrupts.
  359. * @irq_flags: The IRQF_ flags to use for the primary interrupt.
  360. * @irq_base: Allocate at specific IRQ number if irq_base > 0.
  361. * @chip: Configuration for the interrupt controller.
  362. * @data: Runtime data structure for the controller, allocated on success.
  363. *
  364. * Returns 0 on success or an errno on failure.
  365. *
  366. * In order for this to be efficient the chip really should use a
  367. * register cache. The chip driver is responsible for restoring the
  368. * register values used by the IRQ controller over suspend and resume.
  369. */
  370. int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
  371. int irq_base, const struct regmap_irq_chip *chip,
  372. struct regmap_irq_chip_data **data)
  373. {
  374. struct regmap_irq_chip_data *d;
  375. int i;
  376. int ret = -ENOMEM;
  377. u32 reg;
  378. u32 unmask_offset;
  379. if (chip->num_regs <= 0)
  380. return -EINVAL;
  381. for (i = 0; i < chip->num_irqs; i++) {
  382. if (chip->irqs[i].reg_offset % map->reg_stride)
  383. return -EINVAL;
  384. if (chip->irqs[i].reg_offset / map->reg_stride >=
  385. chip->num_regs)
  386. return -EINVAL;
  387. }
  388. if (irq_base) {
  389. irq_base = irq_alloc_descs(irq_base, 0, chip->num_irqs, 0);
  390. if (irq_base < 0) {
  391. dev_warn(map->dev, "Failed to allocate IRQs: %d\n",
  392. irq_base);
  393. return irq_base;
  394. }
  395. }
  396. d = kzalloc(sizeof(*d), GFP_KERNEL);
  397. if (!d)
  398. return -ENOMEM;
  399. d->status_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
  400. GFP_KERNEL);
  401. if (!d->status_buf)
  402. goto err_alloc;
  403. d->mask_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
  404. GFP_KERNEL);
  405. if (!d->mask_buf)
  406. goto err_alloc;
  407. d->mask_buf_def = kcalloc(chip->num_regs, sizeof(unsigned int),
  408. GFP_KERNEL);
  409. if (!d->mask_buf_def)
  410. goto err_alloc;
  411. if (chip->wake_base) {
  412. d->wake_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
  413. GFP_KERNEL);
  414. if (!d->wake_buf)
  415. goto err_alloc;
  416. }
  417. if (chip->num_type_reg) {
  418. d->type_buf_def = kcalloc(chip->num_type_reg,
  419. sizeof(unsigned int), GFP_KERNEL);
  420. if (!d->type_buf_def)
  421. goto err_alloc;
  422. d->type_buf = kcalloc(chip->num_type_reg, sizeof(unsigned int),
  423. GFP_KERNEL);
  424. if (!d->type_buf)
  425. goto err_alloc;
  426. }
  427. d->irq_chip = regmap_irq_chip;
  428. d->irq_chip.name = chip->name;
  429. d->irq = irq;
  430. d->map = map;
  431. d->chip = chip;
  432. d->irq_base = irq_base;
  433. if (chip->irq_reg_stride)
  434. d->irq_reg_stride = chip->irq_reg_stride;
  435. else
  436. d->irq_reg_stride = 1;
  437. if (chip->type_reg_stride)
  438. d->type_reg_stride = chip->type_reg_stride;
  439. else
  440. d->type_reg_stride = 1;
  441. if (!map->use_single_read && map->reg_stride == 1 &&
  442. d->irq_reg_stride == 1) {
  443. d->status_reg_buf = kmalloc_array(chip->num_regs,
  444. map->format.val_bytes,
  445. GFP_KERNEL);
  446. if (!d->status_reg_buf)
  447. goto err_alloc;
  448. }
  449. mutex_init(&d->lock);
  450. for (i = 0; i < chip->num_irqs; i++)
  451. d->mask_buf_def[chip->irqs[i].reg_offset / map->reg_stride]
  452. |= chip->irqs[i].mask;
  453. /* Mask all the interrupts by default */
  454. for (i = 0; i < chip->num_regs; i++) {
  455. d->mask_buf[i] = d->mask_buf_def[i];
  456. reg = chip->mask_base +
  457. (i * map->reg_stride * d->irq_reg_stride);
  458. if (chip->mask_invert)
  459. ret = regmap_irq_update_bits(d, reg,
  460. d->mask_buf[i], ~d->mask_buf[i]);
  461. else if (d->chip->unmask_base) {
  462. unmask_offset = d->chip->unmask_base -
  463. d->chip->mask_base;
  464. ret = regmap_irq_update_bits(d,
  465. reg + unmask_offset,
  466. d->mask_buf[i],
  467. d->mask_buf[i]);
  468. } else
  469. ret = regmap_irq_update_bits(d, reg,
  470. d->mask_buf[i], d->mask_buf[i]);
  471. if (ret != 0) {
  472. dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
  473. reg, ret);
  474. goto err_alloc;
  475. }
  476. if (!chip->init_ack_masked)
  477. continue;
  478. /* Ack masked but set interrupts */
  479. reg = chip->status_base +
  480. (i * map->reg_stride * d->irq_reg_stride);
  481. ret = regmap_read(map, reg, &d->status_buf[i]);
  482. if (ret != 0) {
  483. dev_err(map->dev, "Failed to read IRQ status: %d\n",
  484. ret);
  485. goto err_alloc;
  486. }
  487. if (d->status_buf[i] && (chip->ack_base || chip->use_ack)) {
  488. reg = chip->ack_base +
  489. (i * map->reg_stride * d->irq_reg_stride);
  490. if (chip->ack_invert)
  491. ret = regmap_write(map, reg,
  492. ~(d->status_buf[i] & d->mask_buf[i]));
  493. else
  494. ret = regmap_write(map, reg,
  495. d->status_buf[i] & d->mask_buf[i]);
  496. if (ret != 0) {
  497. dev_err(map->dev, "Failed to ack 0x%x: %d\n",
  498. reg, ret);
  499. goto err_alloc;
  500. }
  501. }
  502. }
  503. /* Wake is disabled by default */
  504. if (d->wake_buf) {
  505. for (i = 0; i < chip->num_regs; i++) {
  506. d->wake_buf[i] = d->mask_buf_def[i];
  507. reg = chip->wake_base +
  508. (i * map->reg_stride * d->irq_reg_stride);
  509. if (chip->wake_invert)
  510. ret = regmap_irq_update_bits(d, reg,
  511. d->mask_buf_def[i],
  512. 0);
  513. else
  514. ret = regmap_irq_update_bits(d, reg,
  515. d->mask_buf_def[i],
  516. d->wake_buf[i]);
  517. if (ret != 0) {
  518. dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
  519. reg, ret);
  520. goto err_alloc;
  521. }
  522. }
  523. }
  524. if (chip->num_type_reg) {
  525. for (i = 0; i < chip->num_irqs; i++) {
  526. reg = chip->irqs[i].type_reg_offset / map->reg_stride;
  527. d->type_buf_def[reg] |= chip->irqs[i].type_rising_mask |
  528. chip->irqs[i].type_falling_mask;
  529. }
  530. for (i = 0; i < chip->num_type_reg; ++i) {
  531. if (!d->type_buf_def[i])
  532. continue;
  533. reg = chip->type_base +
  534. (i * map->reg_stride * d->type_reg_stride);
  535. if (chip->type_invert)
  536. ret = regmap_irq_update_bits(d, reg,
  537. d->type_buf_def[i], 0xFF);
  538. else
  539. ret = regmap_irq_update_bits(d, reg,
  540. d->type_buf_def[i], 0x0);
  541. if (ret != 0) {
  542. dev_err(map->dev,
  543. "Failed to set type in 0x%x: %x\n",
  544. reg, ret);
  545. goto err_alloc;
  546. }
  547. }
  548. }
  549. if (irq_base)
  550. d->domain = irq_domain_add_legacy(map->dev->of_node,
  551. chip->num_irqs, irq_base, 0,
  552. &regmap_domain_ops, d);
  553. else
  554. d->domain = irq_domain_add_linear(map->dev->of_node,
  555. chip->num_irqs,
  556. &regmap_domain_ops, d);
  557. if (!d->domain) {
  558. dev_err(map->dev, "Failed to create IRQ domain\n");
  559. ret = -ENOMEM;
  560. goto err_alloc;
  561. }
  562. ret = request_threaded_irq(irq, NULL, regmap_irq_thread,
  563. irq_flags | IRQF_ONESHOT,
  564. chip->name, d);
  565. if (ret != 0) {
  566. dev_err(map->dev, "Failed to request IRQ %d for %s: %d\n",
  567. irq, chip->name, ret);
  568. goto err_domain;
  569. }
  570. *data = d;
  571. return 0;
  572. err_domain:
  573. /* Should really dispose of the domain but... */
  574. err_alloc:
  575. kfree(d->type_buf);
  576. kfree(d->type_buf_def);
  577. kfree(d->wake_buf);
  578. kfree(d->mask_buf_def);
  579. kfree(d->mask_buf);
  580. kfree(d->status_buf);
  581. kfree(d->status_reg_buf);
  582. kfree(d);
  583. return ret;
  584. }
  585. EXPORT_SYMBOL_GPL(regmap_add_irq_chip);
  586. /**
  587. * regmap_del_irq_chip() - Stop interrupt handling for a regmap IRQ chip
  588. *
  589. * @irq: Primary IRQ for the device
  590. * @d: &regmap_irq_chip_data allocated by regmap_add_irq_chip()
  591. *
  592. * This function also disposes of all mapped IRQs on the chip.
  593. */
  594. void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
  595. {
  596. unsigned int virq;
  597. int hwirq;
  598. if (!d)
  599. return;
  600. free_irq(irq, d);
  601. /* Dispose all virtual irq from irq domain before removing it */
  602. for (hwirq = 0; hwirq < d->chip->num_irqs; hwirq++) {
  603. /* Ignore hwirq if holes in the IRQ list */
  604. if (!d->chip->irqs[hwirq].mask)
  605. continue;
  606. /*
  607. * Find the virtual irq of hwirq on chip and if it is
  608. * there then dispose it
  609. */
  610. virq = irq_find_mapping(d->domain, hwirq);
  611. if (virq)
  612. irq_dispose_mapping(virq);
  613. }
  614. irq_domain_remove(d->domain);
  615. kfree(d->type_buf);
  616. kfree(d->type_buf_def);
  617. kfree(d->wake_buf);
  618. kfree(d->mask_buf_def);
  619. kfree(d->mask_buf);
  620. kfree(d->status_reg_buf);
  621. kfree(d->status_buf);
  622. kfree(d);
  623. }
  624. EXPORT_SYMBOL_GPL(regmap_del_irq_chip);
  625. static void devm_regmap_irq_chip_release(struct device *dev, void *res)
  626. {
  627. struct regmap_irq_chip_data *d = *(struct regmap_irq_chip_data **)res;
  628. regmap_del_irq_chip(d->irq, d);
  629. }
  630. static int devm_regmap_irq_chip_match(struct device *dev, void *res, void *data)
  631. {
  632. struct regmap_irq_chip_data **r = res;
  633. if (!r || !*r) {
  634. WARN_ON(!r || !*r);
  635. return 0;
  636. }
  637. return *r == data;
  638. }
  639. /**
  640. * devm_regmap_add_irq_chip() - Resource manager regmap_add_irq_chip()
  641. *
  642. * @dev: The device pointer on which irq_chip belongs to.
  643. * @map: The regmap for the device.
  644. * @irq: The IRQ the device uses to signal interrupts
  645. * @irq_flags: The IRQF_ flags to use for the primary interrupt.
  646. * @irq_base: Allocate at specific IRQ number if irq_base > 0.
  647. * @chip: Configuration for the interrupt controller.
  648. * @data: Runtime data structure for the controller, allocated on success
  649. *
  650. * Returns 0 on success or an errno on failure.
  651. *
  652. * The &regmap_irq_chip_data will be automatically released when the device is
  653. * unbound.
  654. */
  655. int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq,
  656. int irq_flags, int irq_base,
  657. const struct regmap_irq_chip *chip,
  658. struct regmap_irq_chip_data **data)
  659. {
  660. struct regmap_irq_chip_data **ptr, *d;
  661. int ret;
  662. ptr = devres_alloc(devm_regmap_irq_chip_release, sizeof(*ptr),
  663. GFP_KERNEL);
  664. if (!ptr)
  665. return -ENOMEM;
  666. ret = regmap_add_irq_chip(map, irq, irq_flags, irq_base,
  667. chip, &d);
  668. if (ret < 0) {
  669. devres_free(ptr);
  670. return ret;
  671. }
  672. *ptr = d;
  673. devres_add(dev, ptr);
  674. *data = d;
  675. return 0;
  676. }
  677. EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip);
  678. /**
  679. * devm_regmap_del_irq_chip() - Resource managed regmap_del_irq_chip()
  680. *
  681. * @dev: Device for which which resource was allocated.
  682. * @irq: Primary IRQ for the device.
  683. * @data: &regmap_irq_chip_data allocated by regmap_add_irq_chip().
  684. *
  685. * A resource managed version of regmap_del_irq_chip().
  686. */
  687. void devm_regmap_del_irq_chip(struct device *dev, int irq,
  688. struct regmap_irq_chip_data *data)
  689. {
  690. int rc;
  691. WARN_ON(irq != data->irq);
  692. rc = devres_release(dev, devm_regmap_irq_chip_release,
  693. devm_regmap_irq_chip_match, data);
  694. if (rc != 0)
  695. WARN_ON(rc);
  696. }
  697. EXPORT_SYMBOL_GPL(devm_regmap_del_irq_chip);
  698. /**
  699. * regmap_irq_chip_get_base() - Retrieve interrupt base for a regmap IRQ chip
  700. *
  701. * @data: regmap irq controller to operate on.
  702. *
  703. * Useful for drivers to request their own IRQs.
  704. */
  705. int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data)
  706. {
  707. WARN_ON(!data->irq_base);
  708. return data->irq_base;
  709. }
  710. EXPORT_SYMBOL_GPL(regmap_irq_chip_get_base);
  711. /**
  712. * regmap_irq_get_virq() - Map an interrupt on a chip to a virtual IRQ
  713. *
  714. * @data: regmap irq controller to operate on.
  715. * @irq: index of the interrupt requested in the chip IRQs.
  716. *
  717. * Useful for drivers to request their own IRQs.
  718. */
  719. int regmap_irq_get_virq(struct regmap_irq_chip_data *data, int irq)
  720. {
  721. /* Handle holes in the IRQ list */
  722. if (!data->chip->irqs[irq].mask)
  723. return -EINVAL;
  724. return irq_create_mapping(data->domain, irq);
  725. }
  726. EXPORT_SYMBOL_GPL(regmap_irq_get_virq);
  727. /**
  728. * regmap_irq_get_domain() - Retrieve the irq_domain for the chip
  729. *
  730. * @data: regmap_irq controller to operate on.
  731. *
  732. * Useful for drivers to request their own IRQs and for integration
  733. * with subsystems. For ease of integration NULL is accepted as a
  734. * domain, allowing devices to just call this even if no domain is
  735. * allocated.
  736. */
  737. struct irq_domain *regmap_irq_get_domain(struct regmap_irq_chip_data *data)
  738. {
  739. if (data)
  740. return data->domain;
  741. else
  742. return NULL;
  743. }
  744. EXPORT_SYMBOL_GPL(regmap_irq_get_domain);