chip.c 33 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324
  1. /*
  2. * linux/kernel/irq/chip.c
  3. *
  4. * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
  5. * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
  6. *
  7. * This file contains the core interrupt handling code, for irq-chip
  8. * based architectures.
  9. *
  10. * Detailed information is available in Documentation/DocBook/genericirq
  11. */
  12. #include <linux/irq.h>
  13. #include <linux/msi.h>
  14. #include <linux/module.h>
  15. #include <linux/interrupt.h>
  16. #include <linux/kernel_stat.h>
  17. #include <linux/irqdomain.h>
  18. #include <trace/events/irq.h>
  19. #include "internals.h"
  20. static irqreturn_t bad_chained_irq(int irq, void *dev_id)
  21. {
  22. WARN_ONCE(1, "Chained irq %d should not call an action\n", irq);
  23. return IRQ_NONE;
  24. }
  25. /*
  26. * Chained handlers should never call action on their IRQ. This default
  27. * action will emit warning if such thing happens.
  28. */
  29. struct irqaction chained_action = {
  30. .handler = bad_chained_irq,
  31. };
  32. /**
  33. * irq_set_chip - set the irq chip for an irq
  34. * @irq: irq number
  35. * @chip: pointer to irq chip description structure
  36. */
  37. int irq_set_chip(unsigned int irq, struct irq_chip *chip)
  38. {
  39. unsigned long flags;
  40. struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
  41. if (!desc)
  42. return -EINVAL;
  43. if (!chip)
  44. chip = &no_irq_chip;
  45. desc->irq_data.chip = chip;
  46. irq_put_desc_unlock(desc, flags);
  47. /*
  48. * For !CONFIG_SPARSE_IRQ make the irq show up in
  49. * allocated_irqs.
  50. */
  51. irq_mark_irq(irq);
  52. return 0;
  53. }
  54. EXPORT_SYMBOL(irq_set_chip);
  55. /**
  56. * irq_set_type - set the irq trigger type for an irq
  57. * @irq: irq number
  58. * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
  59. */
  60. int irq_set_irq_type(unsigned int irq, unsigned int type)
  61. {
  62. unsigned long flags;
  63. struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
  64. int ret = 0;
  65. if (!desc)
  66. return -EINVAL;
  67. ret = __irq_set_trigger(desc, type);
  68. irq_put_desc_busunlock(desc, flags);
  69. return ret;
  70. }
  71. EXPORT_SYMBOL(irq_set_irq_type);
  72. /**
  73. * irq_set_handler_data - set irq handler data for an irq
  74. * @irq: Interrupt number
  75. * @data: Pointer to interrupt specific data
  76. *
  77. * Set the hardware irq controller data for an irq
  78. */
  79. int irq_set_handler_data(unsigned int irq, void *data)
  80. {
  81. unsigned long flags;
  82. struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
  83. if (!desc)
  84. return -EINVAL;
  85. desc->irq_common_data.handler_data = data;
  86. irq_put_desc_unlock(desc, flags);
  87. return 0;
  88. }
  89. EXPORT_SYMBOL(irq_set_handler_data);
  90. /**
  91. * irq_set_msi_desc_off - set MSI descriptor data for an irq at offset
  92. * @irq_base: Interrupt number base
  93. * @irq_offset: Interrupt number offset
  94. * @entry: Pointer to MSI descriptor data
  95. *
  96. * Set the MSI descriptor entry for an irq at offset
  97. */
  98. int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset,
  99. struct msi_desc *entry)
  100. {
  101. unsigned long flags;
  102. struct irq_desc *desc = irq_get_desc_lock(irq_base + irq_offset, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
  103. if (!desc)
  104. return -EINVAL;
  105. desc->irq_common_data.msi_desc = entry;
  106. if (entry && !irq_offset)
  107. entry->irq = irq_base;
  108. irq_put_desc_unlock(desc, flags);
  109. return 0;
  110. }
  111. /**
  112. * irq_set_msi_desc - set MSI descriptor data for an irq
  113. * @irq: Interrupt number
  114. * @entry: Pointer to MSI descriptor data
  115. *
  116. * Set the MSI descriptor entry for an irq
  117. */
  118. int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry)
  119. {
  120. return irq_set_msi_desc_off(irq, 0, entry);
  121. }
  122. /**
  123. * irq_set_chip_data - set irq chip data for an irq
  124. * @irq: Interrupt number
  125. * @data: Pointer to chip specific data
  126. *
  127. * Set the hardware irq chip data for an irq
  128. */
  129. int irq_set_chip_data(unsigned int irq, void *data)
  130. {
  131. unsigned long flags;
  132. struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
  133. if (!desc)
  134. return -EINVAL;
  135. desc->irq_data.chip_data = data;
  136. irq_put_desc_unlock(desc, flags);
  137. return 0;
  138. }
  139. EXPORT_SYMBOL(irq_set_chip_data);
  140. struct irq_data *irq_get_irq_data(unsigned int irq)
  141. {
  142. struct irq_desc *desc = irq_to_desc(irq);
  143. return desc ? &desc->irq_data : NULL;
  144. }
  145. EXPORT_SYMBOL_GPL(irq_get_irq_data);
  146. static void irq_state_clr_disabled(struct irq_desc *desc)
  147. {
  148. irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED);
  149. }
  150. static void irq_state_set_disabled(struct irq_desc *desc)
  151. {
  152. irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
  153. }
  154. static void irq_state_clr_masked(struct irq_desc *desc)
  155. {
  156. irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED);
  157. }
  158. static void irq_state_set_masked(struct irq_desc *desc)
  159. {
  160. irqd_set(&desc->irq_data, IRQD_IRQ_MASKED);
  161. }
  162. static void irq_state_clr_started(struct irq_desc *desc)
  163. {
  164. irqd_clear(&desc->irq_data, IRQD_IRQ_STARTED);
  165. }
  166. static void irq_state_set_started(struct irq_desc *desc)
  167. {
  168. irqd_set(&desc->irq_data, IRQD_IRQ_STARTED);
  169. }
  170. enum {
  171. IRQ_STARTUP_NORMAL,
  172. IRQ_STARTUP_MANAGED,
  173. IRQ_STARTUP_ABORT,
  174. };
  175. #ifdef CONFIG_SMP
  176. static int
  177. __irq_startup_managed(struct irq_desc *desc, struct cpumask *aff, bool force)
  178. {
  179. struct irq_data *d = irq_desc_get_irq_data(desc);
  180. if (!irqd_affinity_is_managed(d))
  181. return IRQ_STARTUP_NORMAL;
  182. irqd_clr_managed_shutdown(d);
  183. if (cpumask_any_and(aff, cpu_online_mask) > nr_cpu_ids) {
  184. /*
  185. * Catch code which fiddles with enable_irq() on a managed
  186. * and potentially shutdown IRQ. Chained interrupt
  187. * installment or irq auto probing should not happen on
  188. * managed irqs either. Emit a warning, break the affinity
  189. * and start it up as a normal interrupt.
  190. */
  191. if (WARN_ON_ONCE(force))
  192. return IRQ_STARTUP_NORMAL;
  193. /*
  194. * The interrupt was requested, but there is no online CPU
  195. * in it's affinity mask. Put it into managed shutdown
  196. * state and let the cpu hotplug mechanism start it up once
  197. * a CPU in the mask becomes available.
  198. */
  199. irqd_set_managed_shutdown(d);
  200. return IRQ_STARTUP_ABORT;
  201. }
  202. return IRQ_STARTUP_MANAGED;
  203. }
  204. #else
  205. static int
  206. __irq_startup_managed(struct irq_desc *desc, struct cpumask *aff, bool force)
  207. {
  208. return IRQ_STARTUP_NORMAL;
  209. }
  210. #endif
  211. static int __irq_startup(struct irq_desc *desc)
  212. {
  213. struct irq_data *d = irq_desc_get_irq_data(desc);
  214. int ret = 0;
  215. irq_domain_activate_irq(d);
  216. if (d->chip->irq_startup) {
  217. ret = d->chip->irq_startup(d);
  218. irq_state_clr_disabled(desc);
  219. irq_state_clr_masked(desc);
  220. } else {
  221. irq_enable(desc);
  222. }
  223. irq_state_set_started(desc);
  224. return ret;
  225. }
  226. int irq_startup(struct irq_desc *desc, bool resend, bool force)
  227. {
  228. struct irq_data *d = irq_desc_get_irq_data(desc);
  229. struct cpumask *aff = irq_data_get_affinity_mask(d);
  230. int ret = 0;
  231. desc->depth = 0;
  232. if (irqd_is_started(d)) {
  233. irq_enable(desc);
  234. } else {
  235. switch (__irq_startup_managed(desc, aff, force)) {
  236. case IRQ_STARTUP_NORMAL:
  237. ret = __irq_startup(desc);
  238. irq_setup_affinity(desc);
  239. break;
  240. case IRQ_STARTUP_MANAGED:
  241. ret = __irq_startup(desc);
  242. irq_set_affinity_locked(d, aff, false);
  243. break;
  244. case IRQ_STARTUP_ABORT:
  245. return 0;
  246. }
  247. }
  248. if (resend)
  249. check_irq_resend(desc);
  250. return ret;
  251. }
  252. static void __irq_disable(struct irq_desc *desc, bool mask);
  253. void irq_shutdown(struct irq_desc *desc)
  254. {
  255. if (irqd_is_started(&desc->irq_data)) {
  256. desc->depth = 1;
  257. if (desc->irq_data.chip->irq_shutdown) {
  258. desc->irq_data.chip->irq_shutdown(&desc->irq_data);
  259. irq_state_set_disabled(desc);
  260. irq_state_set_masked(desc);
  261. } else {
  262. __irq_disable(desc, true);
  263. }
  264. irq_state_clr_started(desc);
  265. }
  266. /*
  267. * This must be called even if the interrupt was never started up,
  268. * because the activation can happen before the interrupt is
  269. * available for request/startup. It has it's own state tracking so
  270. * it's safe to call it unconditionally.
  271. */
  272. irq_domain_deactivate_irq(&desc->irq_data);
  273. }
  274. void irq_enable(struct irq_desc *desc)
  275. {
  276. if (!irqd_irq_disabled(&desc->irq_data)) {
  277. unmask_irq(desc);
  278. } else {
  279. irq_state_clr_disabled(desc);
  280. if (desc->irq_data.chip->irq_enable) {
  281. desc->irq_data.chip->irq_enable(&desc->irq_data);
  282. irq_state_clr_masked(desc);
  283. } else {
  284. unmask_irq(desc);
  285. }
  286. }
  287. }
  288. static void __irq_disable(struct irq_desc *desc, bool mask)
  289. {
  290. if (irqd_irq_disabled(&desc->irq_data)) {
  291. if (mask)
  292. mask_irq(desc);
  293. } else {
  294. irq_state_set_disabled(desc);
  295. if (desc->irq_data.chip->irq_disable) {
  296. desc->irq_data.chip->irq_disable(&desc->irq_data);
  297. irq_state_set_masked(desc);
  298. } else if (mask) {
  299. mask_irq(desc);
  300. }
  301. }
  302. }
  303. /**
  304. * irq_disable - Mark interrupt disabled
  305. * @desc: irq descriptor which should be disabled
  306. *
  307. * If the chip does not implement the irq_disable callback, we
  308. * use a lazy disable approach. That means we mark the interrupt
  309. * disabled, but leave the hardware unmasked. That's an
  310. * optimization because we avoid the hardware access for the
  311. * common case where no interrupt happens after we marked it
  312. * disabled. If an interrupt happens, then the interrupt flow
  313. * handler masks the line at the hardware level and marks it
  314. * pending.
  315. *
  316. * If the interrupt chip does not implement the irq_disable callback,
  317. * a driver can disable the lazy approach for a particular irq line by
  318. * calling 'irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY)'. This can
  319. * be used for devices which cannot disable the interrupt at the
  320. * device level under certain circumstances and have to use
  321. * disable_irq[_nosync] instead.
  322. */
  323. void irq_disable(struct irq_desc *desc)
  324. {
  325. __irq_disable(desc, irq_settings_disable_unlazy(desc));
  326. }
  327. void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu)
  328. {
  329. if (desc->irq_data.chip->irq_enable)
  330. desc->irq_data.chip->irq_enable(&desc->irq_data);
  331. else
  332. desc->irq_data.chip->irq_unmask(&desc->irq_data);
  333. cpumask_set_cpu(cpu, desc->percpu_enabled);
  334. }
  335. void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu)
  336. {
  337. if (desc->irq_data.chip->irq_disable)
  338. desc->irq_data.chip->irq_disable(&desc->irq_data);
  339. else
  340. desc->irq_data.chip->irq_mask(&desc->irq_data);
  341. cpumask_clear_cpu(cpu, desc->percpu_enabled);
  342. }
  343. static inline void mask_ack_irq(struct irq_desc *desc)
  344. {
  345. if (desc->irq_data.chip->irq_mask_ack) {
  346. desc->irq_data.chip->irq_mask_ack(&desc->irq_data);
  347. irq_state_set_masked(desc);
  348. } else {
  349. mask_irq(desc);
  350. if (desc->irq_data.chip->irq_ack)
  351. desc->irq_data.chip->irq_ack(&desc->irq_data);
  352. }
  353. }
  354. void mask_irq(struct irq_desc *desc)
  355. {
  356. if (irqd_irq_masked(&desc->irq_data))
  357. return;
  358. if (desc->irq_data.chip->irq_mask) {
  359. desc->irq_data.chip->irq_mask(&desc->irq_data);
  360. irq_state_set_masked(desc);
  361. }
  362. }
  363. void unmask_irq(struct irq_desc *desc)
  364. {
  365. if (!irqd_irq_masked(&desc->irq_data))
  366. return;
  367. if (desc->irq_data.chip->irq_unmask) {
  368. desc->irq_data.chip->irq_unmask(&desc->irq_data);
  369. irq_state_clr_masked(desc);
  370. }
  371. }
  372. void unmask_threaded_irq(struct irq_desc *desc)
  373. {
  374. struct irq_chip *chip = desc->irq_data.chip;
  375. if (chip->flags & IRQCHIP_EOI_THREADED)
  376. chip->irq_eoi(&desc->irq_data);
  377. unmask_irq(desc);
  378. }
  379. /*
  380. * handle_nested_irq - Handle a nested irq from a irq thread
  381. * @irq: the interrupt number
  382. *
  383. * Handle interrupts which are nested into a threaded interrupt
  384. * handler. The handler function is called inside the calling
  385. * threads context.
  386. */
  387. void handle_nested_irq(unsigned int irq)
  388. {
  389. struct irq_desc *desc = irq_to_desc(irq);
  390. struct irqaction *action;
  391. irqreturn_t action_ret;
  392. might_sleep();
  393. raw_spin_lock_irq(&desc->lock);
  394. desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
  395. action = desc->action;
  396. if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) {
  397. desc->istate |= IRQS_PENDING;
  398. goto out_unlock;
  399. }
  400. kstat_incr_irqs_this_cpu(desc);
  401. irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
  402. raw_spin_unlock_irq(&desc->lock);
  403. action_ret = IRQ_NONE;
  404. for_each_action_of_desc(desc, action)
  405. action_ret |= action->thread_fn(action->irq, action->dev_id);
  406. if (!noirqdebug)
  407. note_interrupt(desc, action_ret);
  408. raw_spin_lock_irq(&desc->lock);
  409. irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
  410. out_unlock:
  411. raw_spin_unlock_irq(&desc->lock);
  412. }
  413. EXPORT_SYMBOL_GPL(handle_nested_irq);
  414. static bool irq_check_poll(struct irq_desc *desc)
  415. {
  416. if (!(desc->istate & IRQS_POLL_INPROGRESS))
  417. return false;
  418. return irq_wait_for_poll(desc);
  419. }
  420. static bool irq_may_run(struct irq_desc *desc)
  421. {
  422. unsigned int mask = IRQD_IRQ_INPROGRESS | IRQD_WAKEUP_ARMED;
  423. /*
  424. * If the interrupt is not in progress and is not an armed
  425. * wakeup interrupt, proceed.
  426. */
  427. if (!irqd_has_set(&desc->irq_data, mask))
  428. return true;
  429. /*
  430. * If the interrupt is an armed wakeup source, mark it pending
  431. * and suspended, disable it and notify the pm core about the
  432. * event.
  433. */
  434. if (irq_pm_check_wakeup(desc))
  435. return false;
  436. /*
  437. * Handle a potential concurrent poll on a different core.
  438. */
  439. return irq_check_poll(desc);
  440. }
  441. /**
  442. * handle_simple_irq - Simple and software-decoded IRQs.
  443. * @desc: the interrupt description structure for this irq
  444. *
  445. * Simple interrupts are either sent from a demultiplexing interrupt
  446. * handler or come from hardware, where no interrupt hardware control
  447. * is necessary.
  448. *
  449. * Note: The caller is expected to handle the ack, clear, mask and
  450. * unmask issues if necessary.
  451. */
  452. void handle_simple_irq(struct irq_desc *desc)
  453. {
  454. raw_spin_lock(&desc->lock);
  455. if (!irq_may_run(desc))
  456. goto out_unlock;
  457. desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
  458. if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
  459. desc->istate |= IRQS_PENDING;
  460. goto out_unlock;
  461. }
  462. kstat_incr_irqs_this_cpu(desc);
  463. handle_irq_event(desc);
  464. out_unlock:
  465. raw_spin_unlock(&desc->lock);
  466. }
  467. EXPORT_SYMBOL_GPL(handle_simple_irq);
  468. /**
  469. * handle_untracked_irq - Simple and software-decoded IRQs.
  470. * @desc: the interrupt description structure for this irq
  471. *
  472. * Untracked interrupts are sent from a demultiplexing interrupt
  473. * handler when the demultiplexer does not know which device it its
  474. * multiplexed irq domain generated the interrupt. IRQ's handled
  475. * through here are not subjected to stats tracking, randomness, or
  476. * spurious interrupt detection.
  477. *
  478. * Note: Like handle_simple_irq, the caller is expected to handle
  479. * the ack, clear, mask and unmask issues if necessary.
  480. */
  481. void handle_untracked_irq(struct irq_desc *desc)
  482. {
  483. unsigned int flags = 0;
  484. raw_spin_lock(&desc->lock);
  485. if (!irq_may_run(desc))
  486. goto out_unlock;
  487. desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
  488. if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
  489. desc->istate |= IRQS_PENDING;
  490. goto out_unlock;
  491. }
  492. desc->istate &= ~IRQS_PENDING;
  493. irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
  494. raw_spin_unlock(&desc->lock);
  495. __handle_irq_event_percpu(desc, &flags);
  496. raw_spin_lock(&desc->lock);
  497. irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
  498. out_unlock:
  499. raw_spin_unlock(&desc->lock);
  500. }
  501. EXPORT_SYMBOL_GPL(handle_untracked_irq);
  502. /*
  503. * Called unconditionally from handle_level_irq() and only for oneshot
  504. * interrupts from handle_fasteoi_irq()
  505. */
  506. static void cond_unmask_irq(struct irq_desc *desc)
  507. {
  508. /*
  509. * We need to unmask in the following cases:
  510. * - Standard level irq (IRQF_ONESHOT is not set)
  511. * - Oneshot irq which did not wake the thread (caused by a
  512. * spurious interrupt or a primary handler handling it
  513. * completely).
  514. */
  515. if (!irqd_irq_disabled(&desc->irq_data) &&
  516. irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot)
  517. unmask_irq(desc);
  518. }
  519. /**
  520. * handle_level_irq - Level type irq handler
  521. * @desc: the interrupt description structure for this irq
  522. *
  523. * Level type interrupts are active as long as the hardware line has
  524. * the active level. This may require to mask the interrupt and unmask
  525. * it after the associated handler has acknowledged the device, so the
  526. * interrupt line is back to inactive.
  527. */
  528. void handle_level_irq(struct irq_desc *desc)
  529. {
  530. raw_spin_lock(&desc->lock);
  531. mask_ack_irq(desc);
  532. if (!irq_may_run(desc))
  533. goto out_unlock;
  534. desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
  535. /*
  536. * If its disabled or no action available
  537. * keep it masked and get out of here
  538. */
  539. if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
  540. desc->istate |= IRQS_PENDING;
  541. goto out_unlock;
  542. }
  543. kstat_incr_irqs_this_cpu(desc);
  544. handle_irq_event(desc);
  545. cond_unmask_irq(desc);
  546. out_unlock:
  547. raw_spin_unlock(&desc->lock);
  548. }
  549. EXPORT_SYMBOL_GPL(handle_level_irq);
  550. #ifdef CONFIG_IRQ_PREFLOW_FASTEOI
  551. static inline void preflow_handler(struct irq_desc *desc)
  552. {
  553. if (desc->preflow_handler)
  554. desc->preflow_handler(&desc->irq_data);
  555. }
  556. #else
  557. static inline void preflow_handler(struct irq_desc *desc) { }
  558. #endif
  559. static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip)
  560. {
  561. if (!(desc->istate & IRQS_ONESHOT)) {
  562. chip->irq_eoi(&desc->irq_data);
  563. return;
  564. }
  565. /*
  566. * We need to unmask in the following cases:
  567. * - Oneshot irq which did not wake the thread (caused by a
  568. * spurious interrupt or a primary handler handling it
  569. * completely).
  570. */
  571. if (!irqd_irq_disabled(&desc->irq_data) &&
  572. irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) {
  573. chip->irq_eoi(&desc->irq_data);
  574. unmask_irq(desc);
  575. } else if (!(chip->flags & IRQCHIP_EOI_THREADED)) {
  576. chip->irq_eoi(&desc->irq_data);
  577. }
  578. }
  579. /**
  580. * handle_fasteoi_irq - irq handler for transparent controllers
  581. * @desc: the interrupt description structure for this irq
  582. *
  583. * Only a single callback will be issued to the chip: an ->eoi()
  584. * call when the interrupt has been serviced. This enables support
  585. * for modern forms of interrupt handlers, which handle the flow
  586. * details in hardware, transparently.
  587. */
  588. void handle_fasteoi_irq(struct irq_desc *desc)
  589. {
  590. struct irq_chip *chip = desc->irq_data.chip;
  591. raw_spin_lock(&desc->lock);
  592. if (!irq_may_run(desc))
  593. goto out;
  594. desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
  595. /*
  596. * If its disabled or no action available
  597. * then mask it and get out of here:
  598. */
  599. if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
  600. desc->istate |= IRQS_PENDING;
  601. mask_irq(desc);
  602. goto out;
  603. }
  604. kstat_incr_irqs_this_cpu(desc);
  605. if (desc->istate & IRQS_ONESHOT)
  606. mask_irq(desc);
  607. preflow_handler(desc);
  608. handle_irq_event(desc);
  609. cond_unmask_eoi_irq(desc, chip);
  610. raw_spin_unlock(&desc->lock);
  611. return;
  612. out:
  613. if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
  614. chip->irq_eoi(&desc->irq_data);
  615. raw_spin_unlock(&desc->lock);
  616. }
  617. EXPORT_SYMBOL_GPL(handle_fasteoi_irq);
  618. /**
  619. * handle_edge_irq - edge type IRQ handler
  620. * @desc: the interrupt description structure for this irq
  621. *
  622. * Interrupt occures on the falling and/or rising edge of a hardware
  623. * signal. The occurrence is latched into the irq controller hardware
  624. * and must be acked in order to be reenabled. After the ack another
  625. * interrupt can happen on the same source even before the first one
  626. * is handled by the associated event handler. If this happens it
  627. * might be necessary to disable (mask) the interrupt depending on the
  628. * controller hardware. This requires to reenable the interrupt inside
  629. * of the loop which handles the interrupts which have arrived while
  630. * the handler was running. If all pending interrupts are handled, the
  631. * loop is left.
  632. */
  633. void handle_edge_irq(struct irq_desc *desc)
  634. {
  635. raw_spin_lock(&desc->lock);
  636. desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
  637. if (!irq_may_run(desc)) {
  638. desc->istate |= IRQS_PENDING;
  639. mask_ack_irq(desc);
  640. goto out_unlock;
  641. }
  642. /*
  643. * If its disabled or no action available then mask it and get
  644. * out of here.
  645. */
  646. if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
  647. desc->istate |= IRQS_PENDING;
  648. mask_ack_irq(desc);
  649. goto out_unlock;
  650. }
  651. kstat_incr_irqs_this_cpu(desc);
  652. /* Start handling the irq */
  653. desc->irq_data.chip->irq_ack(&desc->irq_data);
  654. do {
  655. if (unlikely(!desc->action)) {
  656. mask_irq(desc);
  657. goto out_unlock;
  658. }
  659. /*
  660. * When another irq arrived while we were handling
  661. * one, we could have masked the irq.
  662. * Renable it, if it was not disabled in meantime.
  663. */
  664. if (unlikely(desc->istate & IRQS_PENDING)) {
  665. if (!irqd_irq_disabled(&desc->irq_data) &&
  666. irqd_irq_masked(&desc->irq_data))
  667. unmask_irq(desc);
  668. }
  669. handle_irq_event(desc);
  670. } while ((desc->istate & IRQS_PENDING) &&
  671. !irqd_irq_disabled(&desc->irq_data));
  672. out_unlock:
  673. raw_spin_unlock(&desc->lock);
  674. }
  675. EXPORT_SYMBOL(handle_edge_irq);
  676. #ifdef CONFIG_IRQ_EDGE_EOI_HANDLER
  677. /**
  678. * handle_edge_eoi_irq - edge eoi type IRQ handler
  679. * @desc: the interrupt description structure for this irq
  680. *
  681. * Similar as the above handle_edge_irq, but using eoi and w/o the
  682. * mask/unmask logic.
  683. */
  684. void handle_edge_eoi_irq(struct irq_desc *desc)
  685. {
  686. struct irq_chip *chip = irq_desc_get_chip(desc);
  687. raw_spin_lock(&desc->lock);
  688. desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
  689. if (!irq_may_run(desc)) {
  690. desc->istate |= IRQS_PENDING;
  691. goto out_eoi;
  692. }
  693. /*
  694. * If its disabled or no action available then mask it and get
  695. * out of here.
  696. */
  697. if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
  698. desc->istate |= IRQS_PENDING;
  699. goto out_eoi;
  700. }
  701. kstat_incr_irqs_this_cpu(desc);
  702. do {
  703. if (unlikely(!desc->action))
  704. goto out_eoi;
  705. handle_irq_event(desc);
  706. } while ((desc->istate & IRQS_PENDING) &&
  707. !irqd_irq_disabled(&desc->irq_data));
  708. out_eoi:
  709. chip->irq_eoi(&desc->irq_data);
  710. raw_spin_unlock(&desc->lock);
  711. }
  712. #endif
  713. /**
  714. * handle_percpu_irq - Per CPU local irq handler
  715. * @desc: the interrupt description structure for this irq
  716. *
  717. * Per CPU interrupts on SMP machines without locking requirements
  718. */
  719. void handle_percpu_irq(struct irq_desc *desc)
  720. {
  721. struct irq_chip *chip = irq_desc_get_chip(desc);
  722. kstat_incr_irqs_this_cpu(desc);
  723. if (chip->irq_ack)
  724. chip->irq_ack(&desc->irq_data);
  725. handle_irq_event_percpu(desc);
  726. if (chip->irq_eoi)
  727. chip->irq_eoi(&desc->irq_data);
  728. }
  729. /**
  730. * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids
  731. * @desc: the interrupt description structure for this irq
  732. *
  733. * Per CPU interrupts on SMP machines without locking requirements. Same as
  734. * handle_percpu_irq() above but with the following extras:
  735. *
  736. * action->percpu_dev_id is a pointer to percpu variables which
  737. * contain the real device id for the cpu on which this handler is
  738. * called
  739. */
  740. void handle_percpu_devid_irq(struct irq_desc *desc)
  741. {
  742. struct irq_chip *chip = irq_desc_get_chip(desc);
  743. struct irqaction *action = desc->action;
  744. unsigned int irq = irq_desc_get_irq(desc);
  745. irqreturn_t res;
  746. kstat_incr_irqs_this_cpu(desc);
  747. if (chip->irq_ack)
  748. chip->irq_ack(&desc->irq_data);
  749. if (likely(action)) {
  750. trace_irq_handler_entry(irq, action);
  751. res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id));
  752. trace_irq_handler_exit(irq, action, res);
  753. } else {
  754. unsigned int cpu = smp_processor_id();
  755. bool enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
  756. if (enabled)
  757. irq_percpu_disable(desc, cpu);
  758. pr_err_once("Spurious%s percpu IRQ%u on CPU%u\n",
  759. enabled ? " and unmasked" : "", irq, cpu);
  760. }
  761. if (chip->irq_eoi)
  762. chip->irq_eoi(&desc->irq_data);
  763. }
  764. static void
  765. __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle,
  766. int is_chained, const char *name)
  767. {
  768. if (!handle) {
  769. handle = handle_bad_irq;
  770. } else {
  771. struct irq_data *irq_data = &desc->irq_data;
  772. #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
  773. /*
  774. * With hierarchical domains we might run into a
  775. * situation where the outermost chip is not yet set
  776. * up, but the inner chips are there. Instead of
  777. * bailing we install the handler, but obviously we
  778. * cannot enable/startup the interrupt at this point.
  779. */
  780. while (irq_data) {
  781. if (irq_data->chip != &no_irq_chip)
  782. break;
  783. /*
  784. * Bail out if the outer chip is not set up
  785. * and the interrrupt supposed to be started
  786. * right away.
  787. */
  788. if (WARN_ON(is_chained))
  789. return;
  790. /* Try the parent */
  791. irq_data = irq_data->parent_data;
  792. }
  793. #endif
  794. if (WARN_ON(!irq_data || irq_data->chip == &no_irq_chip))
  795. return;
  796. }
  797. /* Uninstall? */
  798. if (handle == handle_bad_irq) {
  799. if (desc->irq_data.chip != &no_irq_chip)
  800. mask_ack_irq(desc);
  801. irq_state_set_disabled(desc);
  802. if (is_chained)
  803. desc->action = NULL;
  804. desc->depth = 1;
  805. }
  806. desc->handle_irq = handle;
  807. desc->name = name;
  808. if (handle != handle_bad_irq && is_chained) {
  809. unsigned int type = irqd_get_trigger_type(&desc->irq_data);
  810. /*
  811. * We're about to start this interrupt immediately,
  812. * hence the need to set the trigger configuration.
  813. * But the .set_type callback may have overridden the
  814. * flow handler, ignoring that we're dealing with a
  815. * chained interrupt. Reset it immediately because we
  816. * do know better.
  817. */
  818. if (type != IRQ_TYPE_NONE) {
  819. __irq_set_trigger(desc, type);
  820. desc->handle_irq = handle;
  821. }
  822. irq_settings_set_noprobe(desc);
  823. irq_settings_set_norequest(desc);
  824. irq_settings_set_nothread(desc);
  825. desc->action = &chained_action;
  826. irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE);
  827. }
  828. }
  829. void
  830. __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
  831. const char *name)
  832. {
  833. unsigned long flags;
  834. struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);
  835. if (!desc)
  836. return;
  837. __irq_do_set_handler(desc, handle, is_chained, name);
  838. irq_put_desc_busunlock(desc, flags);
  839. }
  840. EXPORT_SYMBOL_GPL(__irq_set_handler);
  841. void
  842. irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle,
  843. void *data)
  844. {
  845. unsigned long flags;
  846. struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);
  847. if (!desc)
  848. return;
  849. desc->irq_common_data.handler_data = data;
  850. __irq_do_set_handler(desc, handle, 1, NULL);
  851. irq_put_desc_busunlock(desc, flags);
  852. }
  853. EXPORT_SYMBOL_GPL(irq_set_chained_handler_and_data);
  854. void
  855. irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
  856. irq_flow_handler_t handle, const char *name)
  857. {
  858. irq_set_chip(irq, chip);
  859. __irq_set_handler(irq, handle, 0, name);
  860. }
  861. EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name);
  862. void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
  863. {
  864. unsigned long flags;
  865. struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
  866. if (!desc)
  867. return;
  868. /*
  869. * Warn when a driver sets the no autoenable flag on an already
  870. * active interrupt.
  871. */
  872. WARN_ON_ONCE(!desc->depth && (set & _IRQ_NOAUTOEN));
  873. irq_settings_clr_and_set(desc, clr, set);
  874. irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU |
  875. IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT);
  876. if (irq_settings_has_no_balance_set(desc))
  877. irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
  878. if (irq_settings_is_per_cpu(desc))
  879. irqd_set(&desc->irq_data, IRQD_PER_CPU);
  880. if (irq_settings_can_move_pcntxt(desc))
  881. irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT);
  882. if (irq_settings_is_level(desc))
  883. irqd_set(&desc->irq_data, IRQD_LEVEL);
  884. irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc));
  885. irq_put_desc_unlock(desc, flags);
  886. }
  887. EXPORT_SYMBOL_GPL(irq_modify_status);
  888. /**
  889. * irq_cpu_online - Invoke all irq_cpu_online functions.
  890. *
  891. * Iterate through all irqs and invoke the chip.irq_cpu_online()
  892. * for each.
  893. */
  894. void irq_cpu_online(void)
  895. {
  896. struct irq_desc *desc;
  897. struct irq_chip *chip;
  898. unsigned long flags;
  899. unsigned int irq;
  900. for_each_active_irq(irq) {
  901. desc = irq_to_desc(irq);
  902. if (!desc)
  903. continue;
  904. raw_spin_lock_irqsave(&desc->lock, flags);
  905. chip = irq_data_get_irq_chip(&desc->irq_data);
  906. if (chip && chip->irq_cpu_online &&
  907. (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
  908. !irqd_irq_disabled(&desc->irq_data)))
  909. chip->irq_cpu_online(&desc->irq_data);
  910. raw_spin_unlock_irqrestore(&desc->lock, flags);
  911. }
  912. }
  913. /**
  914. * irq_cpu_offline - Invoke all irq_cpu_offline functions.
  915. *
  916. * Iterate through all irqs and invoke the chip.irq_cpu_offline()
  917. * for each.
  918. */
  919. void irq_cpu_offline(void)
  920. {
  921. struct irq_desc *desc;
  922. struct irq_chip *chip;
  923. unsigned long flags;
  924. unsigned int irq;
  925. for_each_active_irq(irq) {
  926. desc = irq_to_desc(irq);
  927. if (!desc)
  928. continue;
  929. raw_spin_lock_irqsave(&desc->lock, flags);
  930. chip = irq_data_get_irq_chip(&desc->irq_data);
  931. if (chip && chip->irq_cpu_offline &&
  932. (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
  933. !irqd_irq_disabled(&desc->irq_data)))
  934. chip->irq_cpu_offline(&desc->irq_data);
  935. raw_spin_unlock_irqrestore(&desc->lock, flags);
  936. }
  937. }
  938. #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
  939. /**
  940. * irq_chip_enable_parent - Enable the parent interrupt (defaults to unmask if
  941. * NULL)
  942. * @data: Pointer to interrupt specific data
  943. */
  944. void irq_chip_enable_parent(struct irq_data *data)
  945. {
  946. data = data->parent_data;
  947. if (data->chip->irq_enable)
  948. data->chip->irq_enable(data);
  949. else
  950. data->chip->irq_unmask(data);
  951. }
  952. /**
  953. * irq_chip_disable_parent - Disable the parent interrupt (defaults to mask if
  954. * NULL)
  955. * @data: Pointer to interrupt specific data
  956. */
  957. void irq_chip_disable_parent(struct irq_data *data)
  958. {
  959. data = data->parent_data;
  960. if (data->chip->irq_disable)
  961. data->chip->irq_disable(data);
  962. else
  963. data->chip->irq_mask(data);
  964. }
  965. /**
  966. * irq_chip_ack_parent - Acknowledge the parent interrupt
  967. * @data: Pointer to interrupt specific data
  968. */
  969. void irq_chip_ack_parent(struct irq_data *data)
  970. {
  971. data = data->parent_data;
  972. data->chip->irq_ack(data);
  973. }
  974. EXPORT_SYMBOL_GPL(irq_chip_ack_parent);
  975. /**
  976. * irq_chip_mask_parent - Mask the parent interrupt
  977. * @data: Pointer to interrupt specific data
  978. */
  979. void irq_chip_mask_parent(struct irq_data *data)
  980. {
  981. data = data->parent_data;
  982. data->chip->irq_mask(data);
  983. }
  984. EXPORT_SYMBOL_GPL(irq_chip_mask_parent);
  985. /**
  986. * irq_chip_unmask_parent - Unmask the parent interrupt
  987. * @data: Pointer to interrupt specific data
  988. */
  989. void irq_chip_unmask_parent(struct irq_data *data)
  990. {
  991. data = data->parent_data;
  992. data->chip->irq_unmask(data);
  993. }
  994. EXPORT_SYMBOL_GPL(irq_chip_unmask_parent);
  995. /**
  996. * irq_chip_eoi_parent - Invoke EOI on the parent interrupt
  997. * @data: Pointer to interrupt specific data
  998. */
  999. void irq_chip_eoi_parent(struct irq_data *data)
  1000. {
  1001. data = data->parent_data;
  1002. data->chip->irq_eoi(data);
  1003. }
  1004. EXPORT_SYMBOL_GPL(irq_chip_eoi_parent);
  1005. /**
  1006. * irq_chip_set_affinity_parent - Set affinity on the parent interrupt
  1007. * @data: Pointer to interrupt specific data
  1008. * @dest: The affinity mask to set
  1009. * @force: Flag to enforce setting (disable online checks)
  1010. *
  1011. * Conditinal, as the underlying parent chip might not implement it.
  1012. */
  1013. int irq_chip_set_affinity_parent(struct irq_data *data,
  1014. const struct cpumask *dest, bool force)
  1015. {
  1016. data = data->parent_data;
  1017. if (data->chip->irq_set_affinity)
  1018. return data->chip->irq_set_affinity(data, dest, force);
  1019. return -ENOSYS;
  1020. }
  1021. /**
  1022. * irq_chip_set_type_parent - Set IRQ type on the parent interrupt
  1023. * @data: Pointer to interrupt specific data
  1024. * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
  1025. *
  1026. * Conditional, as the underlying parent chip might not implement it.
  1027. */
  1028. int irq_chip_set_type_parent(struct irq_data *data, unsigned int type)
  1029. {
  1030. data = data->parent_data;
  1031. if (data->chip->irq_set_type)
  1032. return data->chip->irq_set_type(data, type);
  1033. return -ENOSYS;
  1034. }
  1035. EXPORT_SYMBOL_GPL(irq_chip_set_type_parent);
  1036. /**
  1037. * irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware
  1038. * @data: Pointer to interrupt specific data
  1039. *
  1040. * Iterate through the domain hierarchy of the interrupt and check
  1041. * whether a hw retrigger function exists. If yes, invoke it.
  1042. */
  1043. int irq_chip_retrigger_hierarchy(struct irq_data *data)
  1044. {
  1045. for (data = data->parent_data; data; data = data->parent_data)
  1046. if (data->chip && data->chip->irq_retrigger)
  1047. return data->chip->irq_retrigger(data);
  1048. return 0;
  1049. }
  1050. /**
  1051. * irq_chip_set_vcpu_affinity_parent - Set vcpu affinity on the parent interrupt
  1052. * @data: Pointer to interrupt specific data
  1053. * @vcpu_info: The vcpu affinity information
  1054. */
  1055. int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, void *vcpu_info)
  1056. {
  1057. data = data->parent_data;
  1058. if (data->chip->irq_set_vcpu_affinity)
  1059. return data->chip->irq_set_vcpu_affinity(data, vcpu_info);
  1060. return -ENOSYS;
  1061. }
  1062. /**
  1063. * irq_chip_set_wake_parent - Set/reset wake-up on the parent interrupt
  1064. * @data: Pointer to interrupt specific data
  1065. * @on: Whether to set or reset the wake-up capability of this irq
  1066. *
  1067. * Conditional, as the underlying parent chip might not implement it.
  1068. */
  1069. int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on)
  1070. {
  1071. data = data->parent_data;
  1072. if (data->chip->irq_set_wake)
  1073. return data->chip->irq_set_wake(data, on);
  1074. return -ENOSYS;
  1075. }
  1076. #endif
  1077. /**
  1078. * irq_chip_compose_msi_msg - Componse msi message for a irq chip
  1079. * @data: Pointer to interrupt specific data
  1080. * @msg: Pointer to the MSI message
  1081. *
  1082. * For hierarchical domains we find the first chip in the hierarchy
  1083. * which implements the irq_compose_msi_msg callback. For non
  1084. * hierarchical we use the top level chip.
  1085. */
  1086. int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
  1087. {
  1088. struct irq_data *pos = NULL;
  1089. #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
  1090. for (; data; data = data->parent_data)
  1091. #endif
  1092. if (data->chip && data->chip->irq_compose_msi_msg)
  1093. pos = data;
  1094. if (!pos)
  1095. return -ENOSYS;
  1096. pos->chip->irq_compose_msi_msg(pos, msg);
  1097. return 0;
  1098. }
  1099. /**
  1100. * irq_chip_pm_get - Enable power for an IRQ chip
  1101. * @data: Pointer to interrupt specific data
  1102. *
  1103. * Enable the power to the IRQ chip referenced by the interrupt data
  1104. * structure.
  1105. */
  1106. int irq_chip_pm_get(struct irq_data *data)
  1107. {
  1108. int retval;
  1109. if (IS_ENABLED(CONFIG_PM) && data->chip->parent_device) {
  1110. retval = pm_runtime_get_sync(data->chip->parent_device);
  1111. if (retval < 0) {
  1112. pm_runtime_put_noidle(data->chip->parent_device);
  1113. return retval;
  1114. }
  1115. }
  1116. return 0;
  1117. }
  1118. /**
  1119. * irq_chip_pm_put - Disable power for an IRQ chip
  1120. * @data: Pointer to interrupt specific data
  1121. *
  1122. * Disable the power to the IRQ chip referenced by the interrupt data
  1123. * structure, belongs. Note that power will only be disabled, once this
  1124. * function has been called for all IRQs that have called irq_chip_pm_get().
  1125. */
  1126. int irq_chip_pm_put(struct irq_data *data)
  1127. {
  1128. int retval = 0;
  1129. if (IS_ENABLED(CONFIG_PM) && data->chip->parent_device)
  1130. retval = pm_runtime_put(data->chip->parent_device);
  1131. return (retval < 0) ? retval : 0;
  1132. }