chip.c 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429
  1. /*
  2. * linux/kernel/irq/chip.c
  3. *
  4. * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
  5. * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
  6. *
  7. * This file contains the core interrupt handling code, for irq-chip
  8. * based architectures.
  9. *
  10. * Detailed information is available in Documentation/core-api/genericirq.rst
  11. */
  12. #include <linux/irq.h>
  13. #include <linux/msi.h>
  14. #include <linux/module.h>
  15. #include <linux/interrupt.h>
  16. #include <linux/kernel_stat.h>
  17. #include <linux/irqdomain.h>
  18. #include <trace/events/irq.h>
  19. #include "internals.h"
  20. static irqreturn_t bad_chained_irq(int irq, void *dev_id)
  21. {
  22. WARN_ONCE(1, "Chained irq %d should not call an action\n", irq);
  23. return IRQ_NONE;
  24. }
  25. /*
  26. * Chained handlers should never call action on their IRQ. This default
  27. * action will emit warning if such thing happens.
  28. */
  29. struct irqaction chained_action = {
  30. .handler = bad_chained_irq,
  31. };
  32. /**
  33. * irq_set_chip - set the irq chip for an irq
  34. * @irq: irq number
  35. * @chip: pointer to irq chip description structure
  36. */
  37. int irq_set_chip(unsigned int irq, struct irq_chip *chip)
  38. {
  39. unsigned long flags;
  40. struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
  41. if (!desc)
  42. return -EINVAL;
  43. if (!chip)
  44. chip = &no_irq_chip;
  45. desc->irq_data.chip = chip;
  46. irq_put_desc_unlock(desc, flags);
  47. /*
  48. * For !CONFIG_SPARSE_IRQ make the irq show up in
  49. * allocated_irqs.
  50. */
  51. irq_mark_irq(irq);
  52. return 0;
  53. }
  54. EXPORT_SYMBOL(irq_set_chip);
  55. /**
  56. * irq_set_type - set the irq trigger type for an irq
  57. * @irq: irq number
  58. * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
  59. */
  60. int irq_set_irq_type(unsigned int irq, unsigned int type)
  61. {
  62. unsigned long flags;
  63. struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
  64. int ret = 0;
  65. if (!desc)
  66. return -EINVAL;
  67. ret = __irq_set_trigger(desc, type);
  68. irq_put_desc_busunlock(desc, flags);
  69. return ret;
  70. }
  71. EXPORT_SYMBOL(irq_set_irq_type);
  72. /**
  73. * irq_set_handler_data - set irq handler data for an irq
  74. * @irq: Interrupt number
  75. * @data: Pointer to interrupt specific data
  76. *
  77. * Set the hardware irq controller data for an irq
  78. */
  79. int irq_set_handler_data(unsigned int irq, void *data)
  80. {
  81. unsigned long flags;
  82. struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
  83. if (!desc)
  84. return -EINVAL;
  85. desc->irq_common_data.handler_data = data;
  86. irq_put_desc_unlock(desc, flags);
  87. return 0;
  88. }
  89. EXPORT_SYMBOL(irq_set_handler_data);
  90. /**
  91. * irq_set_msi_desc_off - set MSI descriptor data for an irq at offset
  92. * @irq_base: Interrupt number base
  93. * @irq_offset: Interrupt number offset
  94. * @entry: Pointer to MSI descriptor data
  95. *
  96. * Set the MSI descriptor entry for an irq at offset
  97. */
  98. int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset,
  99. struct msi_desc *entry)
  100. {
  101. unsigned long flags;
  102. struct irq_desc *desc = irq_get_desc_lock(irq_base + irq_offset, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
  103. if (!desc)
  104. return -EINVAL;
  105. desc->irq_common_data.msi_desc = entry;
  106. if (entry && !irq_offset)
  107. entry->irq = irq_base;
  108. irq_put_desc_unlock(desc, flags);
  109. return 0;
  110. }
  111. /**
  112. * irq_set_msi_desc - set MSI descriptor data for an irq
  113. * @irq: Interrupt number
  114. * @entry: Pointer to MSI descriptor data
  115. *
  116. * Set the MSI descriptor entry for an irq
  117. */
  118. int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry)
  119. {
  120. return irq_set_msi_desc_off(irq, 0, entry);
  121. }
  122. /**
  123. * irq_set_chip_data - set irq chip data for an irq
  124. * @irq: Interrupt number
  125. * @data: Pointer to chip specific data
  126. *
  127. * Set the hardware irq chip data for an irq
  128. */
  129. int irq_set_chip_data(unsigned int irq, void *data)
  130. {
  131. unsigned long flags;
  132. struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
  133. if (!desc)
  134. return -EINVAL;
  135. desc->irq_data.chip_data = data;
  136. irq_put_desc_unlock(desc, flags);
  137. return 0;
  138. }
  139. EXPORT_SYMBOL(irq_set_chip_data);
  140. struct irq_data *irq_get_irq_data(unsigned int irq)
  141. {
  142. struct irq_desc *desc = irq_to_desc(irq);
  143. return desc ? &desc->irq_data : NULL;
  144. }
  145. EXPORT_SYMBOL_GPL(irq_get_irq_data);
  146. static void irq_state_clr_disabled(struct irq_desc *desc)
  147. {
  148. irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED);
  149. }
  150. static void irq_state_clr_masked(struct irq_desc *desc)
  151. {
  152. irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED);
  153. }
  154. static void irq_state_clr_started(struct irq_desc *desc)
  155. {
  156. irqd_clear(&desc->irq_data, IRQD_IRQ_STARTED);
  157. }
  158. static void irq_state_set_started(struct irq_desc *desc)
  159. {
  160. irqd_set(&desc->irq_data, IRQD_IRQ_STARTED);
  161. }
  162. enum {
  163. IRQ_STARTUP_NORMAL,
  164. IRQ_STARTUP_MANAGED,
  165. IRQ_STARTUP_ABORT,
  166. };
  167. #ifdef CONFIG_SMP
  168. static int
  169. __irq_startup_managed(struct irq_desc *desc, struct cpumask *aff, bool force)
  170. {
  171. struct irq_data *d = irq_desc_get_irq_data(desc);
  172. if (!irqd_affinity_is_managed(d))
  173. return IRQ_STARTUP_NORMAL;
  174. irqd_clr_managed_shutdown(d);
  175. if (cpumask_any_and(aff, cpu_online_mask) >= nr_cpu_ids) {
  176. /*
  177. * Catch code which fiddles with enable_irq() on a managed
  178. * and potentially shutdown IRQ. Chained interrupt
  179. * installment or irq auto probing should not happen on
  180. * managed irqs either. Emit a warning, break the affinity
  181. * and start it up as a normal interrupt.
  182. */
  183. if (WARN_ON_ONCE(force))
  184. return IRQ_STARTUP_NORMAL;
  185. /*
  186. * The interrupt was requested, but there is no online CPU
  187. * in it's affinity mask. Put it into managed shutdown
  188. * state and let the cpu hotplug mechanism start it up once
  189. * a CPU in the mask becomes available.
  190. */
  191. irqd_set_managed_shutdown(d);
  192. return IRQ_STARTUP_ABORT;
  193. }
  194. return IRQ_STARTUP_MANAGED;
  195. }
  196. #else
  197. static __always_inline int
  198. __irq_startup_managed(struct irq_desc *desc, struct cpumask *aff, bool force)
  199. {
  200. return IRQ_STARTUP_NORMAL;
  201. }
  202. #endif
  203. static int __irq_startup(struct irq_desc *desc)
  204. {
  205. struct irq_data *d = irq_desc_get_irq_data(desc);
  206. int ret = 0;
  207. irq_domain_activate_irq(d);
  208. if (d->chip->irq_startup) {
  209. ret = d->chip->irq_startup(d);
  210. irq_state_clr_disabled(desc);
  211. irq_state_clr_masked(desc);
  212. } else {
  213. irq_enable(desc);
  214. }
  215. irq_state_set_started(desc);
  216. return ret;
  217. }
  218. int irq_startup(struct irq_desc *desc, bool resend, bool force)
  219. {
  220. struct irq_data *d = irq_desc_get_irq_data(desc);
  221. struct cpumask *aff = irq_data_get_affinity_mask(d);
  222. int ret = 0;
  223. desc->depth = 0;
  224. if (irqd_is_started(d)) {
  225. irq_enable(desc);
  226. } else {
  227. switch (__irq_startup_managed(desc, aff, force)) {
  228. case IRQ_STARTUP_NORMAL:
  229. ret = __irq_startup(desc);
  230. irq_setup_affinity(desc);
  231. break;
  232. case IRQ_STARTUP_MANAGED:
  233. ret = __irq_startup(desc);
  234. irq_set_affinity_locked(d, aff, false);
  235. break;
  236. case IRQ_STARTUP_ABORT:
  237. return 0;
  238. }
  239. }
  240. if (resend)
  241. check_irq_resend(desc);
  242. return ret;
  243. }
  244. static void __irq_disable(struct irq_desc *desc, bool mask);
  245. void irq_shutdown(struct irq_desc *desc)
  246. {
  247. if (irqd_is_started(&desc->irq_data)) {
  248. desc->depth = 1;
  249. if (desc->irq_data.chip->irq_shutdown) {
  250. desc->irq_data.chip->irq_shutdown(&desc->irq_data);
  251. irq_state_set_disabled(desc);
  252. irq_state_set_masked(desc);
  253. } else {
  254. __irq_disable(desc, true);
  255. }
  256. irq_state_clr_started(desc);
  257. }
  258. /*
  259. * This must be called even if the interrupt was never started up,
  260. * because the activation can happen before the interrupt is
  261. * available for request/startup. It has it's own state tracking so
  262. * it's safe to call it unconditionally.
  263. */
  264. irq_domain_deactivate_irq(&desc->irq_data);
  265. }
  266. void irq_enable(struct irq_desc *desc)
  267. {
  268. if (!irqd_irq_disabled(&desc->irq_data)) {
  269. unmask_irq(desc);
  270. } else {
  271. irq_state_clr_disabled(desc);
  272. if (desc->irq_data.chip->irq_enable) {
  273. desc->irq_data.chip->irq_enable(&desc->irq_data);
  274. irq_state_clr_masked(desc);
  275. } else {
  276. unmask_irq(desc);
  277. }
  278. }
  279. }
  280. static void __irq_disable(struct irq_desc *desc, bool mask)
  281. {
  282. if (irqd_irq_disabled(&desc->irq_data)) {
  283. if (mask)
  284. mask_irq(desc);
  285. } else {
  286. irq_state_set_disabled(desc);
  287. if (desc->irq_data.chip->irq_disable) {
  288. desc->irq_data.chip->irq_disable(&desc->irq_data);
  289. irq_state_set_masked(desc);
  290. } else if (mask) {
  291. mask_irq(desc);
  292. }
  293. }
  294. }
  295. /**
  296. * irq_disable - Mark interrupt disabled
  297. * @desc: irq descriptor which should be disabled
  298. *
  299. * If the chip does not implement the irq_disable callback, we
  300. * use a lazy disable approach. That means we mark the interrupt
  301. * disabled, but leave the hardware unmasked. That's an
  302. * optimization because we avoid the hardware access for the
  303. * common case where no interrupt happens after we marked it
  304. * disabled. If an interrupt happens, then the interrupt flow
  305. * handler masks the line at the hardware level and marks it
  306. * pending.
  307. *
  308. * If the interrupt chip does not implement the irq_disable callback,
  309. * a driver can disable the lazy approach for a particular irq line by
  310. * calling 'irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY)'. This can
  311. * be used for devices which cannot disable the interrupt at the
  312. * device level under certain circumstances and have to use
  313. * disable_irq[_nosync] instead.
  314. */
  315. void irq_disable(struct irq_desc *desc)
  316. {
  317. __irq_disable(desc, irq_settings_disable_unlazy(desc));
  318. }
  319. void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu)
  320. {
  321. if (desc->irq_data.chip->irq_enable)
  322. desc->irq_data.chip->irq_enable(&desc->irq_data);
  323. else
  324. desc->irq_data.chip->irq_unmask(&desc->irq_data);
  325. cpumask_set_cpu(cpu, desc->percpu_enabled);
  326. }
  327. void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu)
  328. {
  329. if (desc->irq_data.chip->irq_disable)
  330. desc->irq_data.chip->irq_disable(&desc->irq_data);
  331. else
  332. desc->irq_data.chip->irq_mask(&desc->irq_data);
  333. cpumask_clear_cpu(cpu, desc->percpu_enabled);
  334. }
  335. static inline void mask_ack_irq(struct irq_desc *desc)
  336. {
  337. if (desc->irq_data.chip->irq_mask_ack) {
  338. desc->irq_data.chip->irq_mask_ack(&desc->irq_data);
  339. irq_state_set_masked(desc);
  340. } else {
  341. mask_irq(desc);
  342. if (desc->irq_data.chip->irq_ack)
  343. desc->irq_data.chip->irq_ack(&desc->irq_data);
  344. }
  345. }
  346. void mask_irq(struct irq_desc *desc)
  347. {
  348. if (irqd_irq_masked(&desc->irq_data))
  349. return;
  350. if (desc->irq_data.chip->irq_mask) {
  351. desc->irq_data.chip->irq_mask(&desc->irq_data);
  352. irq_state_set_masked(desc);
  353. }
  354. }
  355. void unmask_irq(struct irq_desc *desc)
  356. {
  357. if (!irqd_irq_masked(&desc->irq_data))
  358. return;
  359. if (desc->irq_data.chip->irq_unmask) {
  360. desc->irq_data.chip->irq_unmask(&desc->irq_data);
  361. irq_state_clr_masked(desc);
  362. }
  363. }
  364. void unmask_threaded_irq(struct irq_desc *desc)
  365. {
  366. struct irq_chip *chip = desc->irq_data.chip;
  367. if (chip->flags & IRQCHIP_EOI_THREADED)
  368. chip->irq_eoi(&desc->irq_data);
  369. unmask_irq(desc);
  370. }
  371. /*
  372. * handle_nested_irq - Handle a nested irq from a irq thread
  373. * @irq: the interrupt number
  374. *
  375. * Handle interrupts which are nested into a threaded interrupt
  376. * handler. The handler function is called inside the calling
  377. * threads context.
  378. */
  379. void handle_nested_irq(unsigned int irq)
  380. {
  381. struct irq_desc *desc = irq_to_desc(irq);
  382. struct irqaction *action;
  383. irqreturn_t action_ret;
  384. might_sleep();
  385. raw_spin_lock_irq(&desc->lock);
  386. desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
  387. action = desc->action;
  388. if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) {
  389. desc->istate |= IRQS_PENDING;
  390. goto out_unlock;
  391. }
  392. kstat_incr_irqs_this_cpu(desc);
  393. irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
  394. raw_spin_unlock_irq(&desc->lock);
  395. action_ret = IRQ_NONE;
  396. for_each_action_of_desc(desc, action)
  397. action_ret |= action->thread_fn(action->irq, action->dev_id);
  398. if (!noirqdebug)
  399. note_interrupt(desc, action_ret);
  400. raw_spin_lock_irq(&desc->lock);
  401. irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
  402. out_unlock:
  403. raw_spin_unlock_irq(&desc->lock);
  404. }
  405. EXPORT_SYMBOL_GPL(handle_nested_irq);
  406. static bool irq_check_poll(struct irq_desc *desc)
  407. {
  408. if (!(desc->istate & IRQS_POLL_INPROGRESS))
  409. return false;
  410. return irq_wait_for_poll(desc);
  411. }
  412. static bool irq_may_run(struct irq_desc *desc)
  413. {
  414. unsigned int mask = IRQD_IRQ_INPROGRESS | IRQD_WAKEUP_ARMED;
  415. /*
  416. * If the interrupt is not in progress and is not an armed
  417. * wakeup interrupt, proceed.
  418. */
  419. if (!irqd_has_set(&desc->irq_data, mask))
  420. return true;
  421. /*
  422. * If the interrupt is an armed wakeup source, mark it pending
  423. * and suspended, disable it and notify the pm core about the
  424. * event.
  425. */
  426. if (irq_pm_check_wakeup(desc))
  427. return false;
  428. /*
  429. * Handle a potential concurrent poll on a different core.
  430. */
  431. return irq_check_poll(desc);
  432. }
  433. /**
  434. * handle_simple_irq - Simple and software-decoded IRQs.
  435. * @desc: the interrupt description structure for this irq
  436. *
  437. * Simple interrupts are either sent from a demultiplexing interrupt
  438. * handler or come from hardware, where no interrupt hardware control
  439. * is necessary.
  440. *
  441. * Note: The caller is expected to handle the ack, clear, mask and
  442. * unmask issues if necessary.
  443. */
  444. void handle_simple_irq(struct irq_desc *desc)
  445. {
  446. raw_spin_lock(&desc->lock);
  447. if (!irq_may_run(desc))
  448. goto out_unlock;
  449. desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
  450. if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
  451. desc->istate |= IRQS_PENDING;
  452. goto out_unlock;
  453. }
  454. kstat_incr_irqs_this_cpu(desc);
  455. handle_irq_event(desc);
  456. out_unlock:
  457. raw_spin_unlock(&desc->lock);
  458. }
  459. EXPORT_SYMBOL_GPL(handle_simple_irq);
  460. /**
  461. * handle_untracked_irq - Simple and software-decoded IRQs.
  462. * @desc: the interrupt description structure for this irq
  463. *
  464. * Untracked interrupts are sent from a demultiplexing interrupt
  465. * handler when the demultiplexer does not know which device it its
  466. * multiplexed irq domain generated the interrupt. IRQ's handled
  467. * through here are not subjected to stats tracking, randomness, or
  468. * spurious interrupt detection.
  469. *
  470. * Note: Like handle_simple_irq, the caller is expected to handle
  471. * the ack, clear, mask and unmask issues if necessary.
  472. */
  473. void handle_untracked_irq(struct irq_desc *desc)
  474. {
  475. unsigned int flags = 0;
  476. raw_spin_lock(&desc->lock);
  477. if (!irq_may_run(desc))
  478. goto out_unlock;
  479. desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
  480. if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
  481. desc->istate |= IRQS_PENDING;
  482. goto out_unlock;
  483. }
  484. desc->istate &= ~IRQS_PENDING;
  485. irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
  486. raw_spin_unlock(&desc->lock);
  487. __handle_irq_event_percpu(desc, &flags);
  488. raw_spin_lock(&desc->lock);
  489. irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
  490. out_unlock:
  491. raw_spin_unlock(&desc->lock);
  492. }
  493. EXPORT_SYMBOL_GPL(handle_untracked_irq);
  494. /*
  495. * Called unconditionally from handle_level_irq() and only for oneshot
  496. * interrupts from handle_fasteoi_irq()
  497. */
  498. static void cond_unmask_irq(struct irq_desc *desc)
  499. {
  500. /*
  501. * We need to unmask in the following cases:
  502. * - Standard level irq (IRQF_ONESHOT is not set)
  503. * - Oneshot irq which did not wake the thread (caused by a
  504. * spurious interrupt or a primary handler handling it
  505. * completely).
  506. */
  507. if (!irqd_irq_disabled(&desc->irq_data) &&
  508. irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot)
  509. unmask_irq(desc);
  510. }
  511. /**
  512. * handle_level_irq - Level type irq handler
  513. * @desc: the interrupt description structure for this irq
  514. *
  515. * Level type interrupts are active as long as the hardware line has
  516. * the active level. This may require to mask the interrupt and unmask
  517. * it after the associated handler has acknowledged the device, so the
  518. * interrupt line is back to inactive.
  519. */
  520. void handle_level_irq(struct irq_desc *desc)
  521. {
  522. raw_spin_lock(&desc->lock);
  523. mask_ack_irq(desc);
  524. if (!irq_may_run(desc))
  525. goto out_unlock;
  526. desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
  527. /*
  528. * If its disabled or no action available
  529. * keep it masked and get out of here
  530. */
  531. if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
  532. desc->istate |= IRQS_PENDING;
  533. goto out_unlock;
  534. }
  535. kstat_incr_irqs_this_cpu(desc);
  536. handle_irq_event(desc);
  537. cond_unmask_irq(desc);
  538. out_unlock:
  539. raw_spin_unlock(&desc->lock);
  540. }
  541. EXPORT_SYMBOL_GPL(handle_level_irq);
  542. #ifdef CONFIG_IRQ_PREFLOW_FASTEOI
  543. static inline void preflow_handler(struct irq_desc *desc)
  544. {
  545. if (desc->preflow_handler)
  546. desc->preflow_handler(&desc->irq_data);
  547. }
  548. #else
  549. static inline void preflow_handler(struct irq_desc *desc) { }
  550. #endif
  551. static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip)
  552. {
  553. if (!(desc->istate & IRQS_ONESHOT)) {
  554. chip->irq_eoi(&desc->irq_data);
  555. return;
  556. }
  557. /*
  558. * We need to unmask in the following cases:
  559. * - Oneshot irq which did not wake the thread (caused by a
  560. * spurious interrupt or a primary handler handling it
  561. * completely).
  562. */
  563. if (!irqd_irq_disabled(&desc->irq_data) &&
  564. irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) {
  565. chip->irq_eoi(&desc->irq_data);
  566. unmask_irq(desc);
  567. } else if (!(chip->flags & IRQCHIP_EOI_THREADED)) {
  568. chip->irq_eoi(&desc->irq_data);
  569. }
  570. }
  571. /**
  572. * handle_fasteoi_irq - irq handler for transparent controllers
  573. * @desc: the interrupt description structure for this irq
  574. *
  575. * Only a single callback will be issued to the chip: an ->eoi()
  576. * call when the interrupt has been serviced. This enables support
  577. * for modern forms of interrupt handlers, which handle the flow
  578. * details in hardware, transparently.
  579. */
  580. void handle_fasteoi_irq(struct irq_desc *desc)
  581. {
  582. struct irq_chip *chip = desc->irq_data.chip;
  583. raw_spin_lock(&desc->lock);
  584. if (!irq_may_run(desc))
  585. goto out;
  586. desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
  587. /*
  588. * If its disabled or no action available
  589. * then mask it and get out of here:
  590. */
  591. if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
  592. desc->istate |= IRQS_PENDING;
  593. mask_irq(desc);
  594. goto out;
  595. }
  596. kstat_incr_irqs_this_cpu(desc);
  597. if (desc->istate & IRQS_ONESHOT)
  598. mask_irq(desc);
  599. preflow_handler(desc);
  600. handle_irq_event(desc);
  601. cond_unmask_eoi_irq(desc, chip);
  602. raw_spin_unlock(&desc->lock);
  603. return;
  604. out:
  605. if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
  606. chip->irq_eoi(&desc->irq_data);
  607. raw_spin_unlock(&desc->lock);
  608. }
  609. EXPORT_SYMBOL_GPL(handle_fasteoi_irq);
  610. /**
  611. * handle_edge_irq - edge type IRQ handler
  612. * @desc: the interrupt description structure for this irq
  613. *
  614. * Interrupt occures on the falling and/or rising edge of a hardware
  615. * signal. The occurrence is latched into the irq controller hardware
  616. * and must be acked in order to be reenabled. After the ack another
  617. * interrupt can happen on the same source even before the first one
  618. * is handled by the associated event handler. If this happens it
  619. * might be necessary to disable (mask) the interrupt depending on the
  620. * controller hardware. This requires to reenable the interrupt inside
  621. * of the loop which handles the interrupts which have arrived while
  622. * the handler was running. If all pending interrupts are handled, the
  623. * loop is left.
  624. */
  625. void handle_edge_irq(struct irq_desc *desc)
  626. {
  627. raw_spin_lock(&desc->lock);
  628. desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
  629. if (!irq_may_run(desc)) {
  630. desc->istate |= IRQS_PENDING;
  631. mask_ack_irq(desc);
  632. goto out_unlock;
  633. }
  634. /*
  635. * If its disabled or no action available then mask it and get
  636. * out of here.
  637. */
  638. if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
  639. desc->istate |= IRQS_PENDING;
  640. mask_ack_irq(desc);
  641. goto out_unlock;
  642. }
  643. kstat_incr_irqs_this_cpu(desc);
  644. /* Start handling the irq */
  645. desc->irq_data.chip->irq_ack(&desc->irq_data);
  646. do {
  647. if (unlikely(!desc->action)) {
  648. mask_irq(desc);
  649. goto out_unlock;
  650. }
  651. /*
  652. * When another irq arrived while we were handling
  653. * one, we could have masked the irq.
  654. * Renable it, if it was not disabled in meantime.
  655. */
  656. if (unlikely(desc->istate & IRQS_PENDING)) {
  657. if (!irqd_irq_disabled(&desc->irq_data) &&
  658. irqd_irq_masked(&desc->irq_data))
  659. unmask_irq(desc);
  660. }
  661. handle_irq_event(desc);
  662. } while ((desc->istate & IRQS_PENDING) &&
  663. !irqd_irq_disabled(&desc->irq_data));
  664. out_unlock:
  665. raw_spin_unlock(&desc->lock);
  666. }
  667. EXPORT_SYMBOL(handle_edge_irq);
  668. #ifdef CONFIG_IRQ_EDGE_EOI_HANDLER
  669. /**
  670. * handle_edge_eoi_irq - edge eoi type IRQ handler
  671. * @desc: the interrupt description structure for this irq
  672. *
  673. * Similar as the above handle_edge_irq, but using eoi and w/o the
  674. * mask/unmask logic.
  675. */
  676. void handle_edge_eoi_irq(struct irq_desc *desc)
  677. {
  678. struct irq_chip *chip = irq_desc_get_chip(desc);
  679. raw_spin_lock(&desc->lock);
  680. desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
  681. if (!irq_may_run(desc)) {
  682. desc->istate |= IRQS_PENDING;
  683. goto out_eoi;
  684. }
  685. /*
  686. * If its disabled or no action available then mask it and get
  687. * out of here.
  688. */
  689. if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
  690. desc->istate |= IRQS_PENDING;
  691. goto out_eoi;
  692. }
  693. kstat_incr_irqs_this_cpu(desc);
  694. do {
  695. if (unlikely(!desc->action))
  696. goto out_eoi;
  697. handle_irq_event(desc);
  698. } while ((desc->istate & IRQS_PENDING) &&
  699. !irqd_irq_disabled(&desc->irq_data));
  700. out_eoi:
  701. chip->irq_eoi(&desc->irq_data);
  702. raw_spin_unlock(&desc->lock);
  703. }
  704. #endif
  705. /**
  706. * handle_percpu_irq - Per CPU local irq handler
  707. * @desc: the interrupt description structure for this irq
  708. *
  709. * Per CPU interrupts on SMP machines without locking requirements
  710. */
  711. void handle_percpu_irq(struct irq_desc *desc)
  712. {
  713. struct irq_chip *chip = irq_desc_get_chip(desc);
  714. kstat_incr_irqs_this_cpu(desc);
  715. if (chip->irq_ack)
  716. chip->irq_ack(&desc->irq_data);
  717. handle_irq_event_percpu(desc);
  718. if (chip->irq_eoi)
  719. chip->irq_eoi(&desc->irq_data);
  720. }
  721. /**
  722. * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids
  723. * @desc: the interrupt description structure for this irq
  724. *
  725. * Per CPU interrupts on SMP machines without locking requirements. Same as
  726. * handle_percpu_irq() above but with the following extras:
  727. *
  728. * action->percpu_dev_id is a pointer to percpu variables which
  729. * contain the real device id for the cpu on which this handler is
  730. * called
  731. */
  732. void handle_percpu_devid_irq(struct irq_desc *desc)
  733. {
  734. struct irq_chip *chip = irq_desc_get_chip(desc);
  735. struct irqaction *action = desc->action;
  736. unsigned int irq = irq_desc_get_irq(desc);
  737. irqreturn_t res;
  738. kstat_incr_irqs_this_cpu(desc);
  739. if (chip->irq_ack)
  740. chip->irq_ack(&desc->irq_data);
  741. if (likely(action)) {
  742. trace_irq_handler_entry(irq, action);
  743. res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id));
  744. trace_irq_handler_exit(irq, action, res);
  745. } else {
  746. unsigned int cpu = smp_processor_id();
  747. bool enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
  748. if (enabled)
  749. irq_percpu_disable(desc, cpu);
  750. pr_err_once("Spurious%s percpu IRQ%u on CPU%u\n",
  751. enabled ? " and unmasked" : "", irq, cpu);
  752. }
  753. if (chip->irq_eoi)
  754. chip->irq_eoi(&desc->irq_data);
  755. }
  756. static void
  757. __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle,
  758. int is_chained, const char *name)
  759. {
  760. if (!handle) {
  761. handle = handle_bad_irq;
  762. } else {
  763. struct irq_data *irq_data = &desc->irq_data;
  764. #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
  765. /*
  766. * With hierarchical domains we might run into a
  767. * situation where the outermost chip is not yet set
  768. * up, but the inner chips are there. Instead of
  769. * bailing we install the handler, but obviously we
  770. * cannot enable/startup the interrupt at this point.
  771. */
  772. while (irq_data) {
  773. if (irq_data->chip != &no_irq_chip)
  774. break;
  775. /*
  776. * Bail out if the outer chip is not set up
  777. * and the interrrupt supposed to be started
  778. * right away.
  779. */
  780. if (WARN_ON(is_chained))
  781. return;
  782. /* Try the parent */
  783. irq_data = irq_data->parent_data;
  784. }
  785. #endif
  786. if (WARN_ON(!irq_data || irq_data->chip == &no_irq_chip))
  787. return;
  788. }
  789. /* Uninstall? */
  790. if (handle == handle_bad_irq) {
  791. if (desc->irq_data.chip != &no_irq_chip)
  792. mask_ack_irq(desc);
  793. irq_state_set_disabled(desc);
  794. if (is_chained)
  795. desc->action = NULL;
  796. desc->depth = 1;
  797. }
  798. desc->handle_irq = handle;
  799. desc->name = name;
  800. if (handle != handle_bad_irq && is_chained) {
  801. unsigned int type = irqd_get_trigger_type(&desc->irq_data);
  802. /*
  803. * We're about to start this interrupt immediately,
  804. * hence the need to set the trigger configuration.
  805. * But the .set_type callback may have overridden the
  806. * flow handler, ignoring that we're dealing with a
  807. * chained interrupt. Reset it immediately because we
  808. * do know better.
  809. */
  810. if (type != IRQ_TYPE_NONE) {
  811. __irq_set_trigger(desc, type);
  812. desc->handle_irq = handle;
  813. }
  814. irq_settings_set_noprobe(desc);
  815. irq_settings_set_norequest(desc);
  816. irq_settings_set_nothread(desc);
  817. desc->action = &chained_action;
  818. irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE);
  819. }
  820. }
  821. void
  822. __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
  823. const char *name)
  824. {
  825. unsigned long flags;
  826. struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);
  827. if (!desc)
  828. return;
  829. __irq_do_set_handler(desc, handle, is_chained, name);
  830. irq_put_desc_busunlock(desc, flags);
  831. }
  832. EXPORT_SYMBOL_GPL(__irq_set_handler);
  833. void
  834. irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle,
  835. void *data)
  836. {
  837. unsigned long flags;
  838. struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);
  839. if (!desc)
  840. return;
  841. desc->irq_common_data.handler_data = data;
  842. __irq_do_set_handler(desc, handle, 1, NULL);
  843. irq_put_desc_busunlock(desc, flags);
  844. }
  845. EXPORT_SYMBOL_GPL(irq_set_chained_handler_and_data);
  846. void
  847. irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
  848. irq_flow_handler_t handle, const char *name)
  849. {
  850. irq_set_chip(irq, chip);
  851. __irq_set_handler(irq, handle, 0, name);
  852. }
  853. EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name);
  854. void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
  855. {
  856. unsigned long flags, trigger, tmp;
  857. struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
  858. if (!desc)
  859. return;
  860. /*
  861. * Warn when a driver sets the no autoenable flag on an already
  862. * active interrupt.
  863. */
  864. WARN_ON_ONCE(!desc->depth && (set & _IRQ_NOAUTOEN));
  865. irq_settings_clr_and_set(desc, clr, set);
  866. trigger = irqd_get_trigger_type(&desc->irq_data);
  867. irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU |
  868. IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT);
  869. if (irq_settings_has_no_balance_set(desc))
  870. irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
  871. if (irq_settings_is_per_cpu(desc))
  872. irqd_set(&desc->irq_data, IRQD_PER_CPU);
  873. if (irq_settings_can_move_pcntxt(desc))
  874. irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT);
  875. if (irq_settings_is_level(desc))
  876. irqd_set(&desc->irq_data, IRQD_LEVEL);
  877. tmp = irq_settings_get_trigger_mask(desc);
  878. if (tmp != IRQ_TYPE_NONE)
  879. trigger = tmp;
  880. irqd_set(&desc->irq_data, trigger);
  881. irq_put_desc_unlock(desc, flags);
  882. }
  883. EXPORT_SYMBOL_GPL(irq_modify_status);
  884. /**
  885. * irq_cpu_online - Invoke all irq_cpu_online functions.
  886. *
  887. * Iterate through all irqs and invoke the chip.irq_cpu_online()
  888. * for each.
  889. */
  890. void irq_cpu_online(void)
  891. {
  892. struct irq_desc *desc;
  893. struct irq_chip *chip;
  894. unsigned long flags;
  895. unsigned int irq;
  896. for_each_active_irq(irq) {
  897. desc = irq_to_desc(irq);
  898. if (!desc)
  899. continue;
  900. raw_spin_lock_irqsave(&desc->lock, flags);
  901. chip = irq_data_get_irq_chip(&desc->irq_data);
  902. if (chip && chip->irq_cpu_online &&
  903. (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
  904. !irqd_irq_disabled(&desc->irq_data)))
  905. chip->irq_cpu_online(&desc->irq_data);
  906. raw_spin_unlock_irqrestore(&desc->lock, flags);
  907. }
  908. }
  909. /**
  910. * irq_cpu_offline - Invoke all irq_cpu_offline functions.
  911. *
  912. * Iterate through all irqs and invoke the chip.irq_cpu_offline()
  913. * for each.
  914. */
  915. void irq_cpu_offline(void)
  916. {
  917. struct irq_desc *desc;
  918. struct irq_chip *chip;
  919. unsigned long flags;
  920. unsigned int irq;
  921. for_each_active_irq(irq) {
  922. desc = irq_to_desc(irq);
  923. if (!desc)
  924. continue;
  925. raw_spin_lock_irqsave(&desc->lock, flags);
  926. chip = irq_data_get_irq_chip(&desc->irq_data);
  927. if (chip && chip->irq_cpu_offline &&
  928. (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
  929. !irqd_irq_disabled(&desc->irq_data)))
  930. chip->irq_cpu_offline(&desc->irq_data);
  931. raw_spin_unlock_irqrestore(&desc->lock, flags);
  932. }
  933. }
  934. #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
  935. #ifdef CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS
  936. /**
  937. * handle_fasteoi_ack_irq - irq handler for edge hierarchy
  938. * stacked on transparent controllers
  939. *
  940. * @desc: the interrupt description structure for this irq
  941. *
  942. * Like handle_fasteoi_irq(), but for use with hierarchy where
  943. * the irq_chip also needs to have its ->irq_ack() function
  944. * called.
  945. */
  946. void handle_fasteoi_ack_irq(struct irq_desc *desc)
  947. {
  948. struct irq_chip *chip = desc->irq_data.chip;
  949. raw_spin_lock(&desc->lock);
  950. if (!irq_may_run(desc))
  951. goto out;
  952. desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
  953. /*
  954. * If its disabled or no action available
  955. * then mask it and get out of here:
  956. */
  957. if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
  958. desc->istate |= IRQS_PENDING;
  959. mask_irq(desc);
  960. goto out;
  961. }
  962. kstat_incr_irqs_this_cpu(desc);
  963. if (desc->istate & IRQS_ONESHOT)
  964. mask_irq(desc);
  965. /* Start handling the irq */
  966. desc->irq_data.chip->irq_ack(&desc->irq_data);
  967. preflow_handler(desc);
  968. handle_irq_event(desc);
  969. cond_unmask_eoi_irq(desc, chip);
  970. raw_spin_unlock(&desc->lock);
  971. return;
  972. out:
  973. if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
  974. chip->irq_eoi(&desc->irq_data);
  975. raw_spin_unlock(&desc->lock);
  976. }
  977. EXPORT_SYMBOL_GPL(handle_fasteoi_ack_irq);
  978. /**
  979. * handle_fasteoi_mask_irq - irq handler for level hierarchy
  980. * stacked on transparent controllers
  981. *
  982. * @desc: the interrupt description structure for this irq
  983. *
  984. * Like handle_fasteoi_irq(), but for use with hierarchy where
  985. * the irq_chip also needs to have its ->irq_mask_ack() function
  986. * called.
  987. */
  988. void handle_fasteoi_mask_irq(struct irq_desc *desc)
  989. {
  990. struct irq_chip *chip = desc->irq_data.chip;
  991. raw_spin_lock(&desc->lock);
  992. mask_ack_irq(desc);
  993. if (!irq_may_run(desc))
  994. goto out;
  995. desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
  996. /*
  997. * If its disabled or no action available
  998. * then mask it and get out of here:
  999. */
  1000. if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
  1001. desc->istate |= IRQS_PENDING;
  1002. mask_irq(desc);
  1003. goto out;
  1004. }
  1005. kstat_incr_irqs_this_cpu(desc);
  1006. if (desc->istate & IRQS_ONESHOT)
  1007. mask_irq(desc);
  1008. preflow_handler(desc);
  1009. handle_irq_event(desc);
  1010. cond_unmask_eoi_irq(desc, chip);
  1011. raw_spin_unlock(&desc->lock);
  1012. return;
  1013. out:
  1014. if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
  1015. chip->irq_eoi(&desc->irq_data);
  1016. raw_spin_unlock(&desc->lock);
  1017. }
  1018. EXPORT_SYMBOL_GPL(handle_fasteoi_mask_irq);
  1019. #endif /* CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS */
  1020. /**
  1021. * irq_chip_enable_parent - Enable the parent interrupt (defaults to unmask if
  1022. * NULL)
  1023. * @data: Pointer to interrupt specific data
  1024. */
  1025. void irq_chip_enable_parent(struct irq_data *data)
  1026. {
  1027. data = data->parent_data;
  1028. if (data->chip->irq_enable)
  1029. data->chip->irq_enable(data);
  1030. else
  1031. data->chip->irq_unmask(data);
  1032. }
  1033. EXPORT_SYMBOL_GPL(irq_chip_enable_parent);
  1034. /**
  1035. * irq_chip_disable_parent - Disable the parent interrupt (defaults to mask if
  1036. * NULL)
  1037. * @data: Pointer to interrupt specific data
  1038. */
  1039. void irq_chip_disable_parent(struct irq_data *data)
  1040. {
  1041. data = data->parent_data;
  1042. if (data->chip->irq_disable)
  1043. data->chip->irq_disable(data);
  1044. else
  1045. data->chip->irq_mask(data);
  1046. }
  1047. EXPORT_SYMBOL_GPL(irq_chip_disable_parent);
  1048. /**
  1049. * irq_chip_ack_parent - Acknowledge the parent interrupt
  1050. * @data: Pointer to interrupt specific data
  1051. */
  1052. void irq_chip_ack_parent(struct irq_data *data)
  1053. {
  1054. data = data->parent_data;
  1055. data->chip->irq_ack(data);
  1056. }
  1057. EXPORT_SYMBOL_GPL(irq_chip_ack_parent);
  1058. /**
  1059. * irq_chip_mask_parent - Mask the parent interrupt
  1060. * @data: Pointer to interrupt specific data
  1061. */
  1062. void irq_chip_mask_parent(struct irq_data *data)
  1063. {
  1064. data = data->parent_data;
  1065. data->chip->irq_mask(data);
  1066. }
  1067. EXPORT_SYMBOL_GPL(irq_chip_mask_parent);
  1068. /**
  1069. * irq_chip_unmask_parent - Unmask the parent interrupt
  1070. * @data: Pointer to interrupt specific data
  1071. */
  1072. void irq_chip_unmask_parent(struct irq_data *data)
  1073. {
  1074. data = data->parent_data;
  1075. data->chip->irq_unmask(data);
  1076. }
  1077. EXPORT_SYMBOL_GPL(irq_chip_unmask_parent);
  1078. /**
  1079. * irq_chip_eoi_parent - Invoke EOI on the parent interrupt
  1080. * @data: Pointer to interrupt specific data
  1081. */
  1082. void irq_chip_eoi_parent(struct irq_data *data)
  1083. {
  1084. data = data->parent_data;
  1085. data->chip->irq_eoi(data);
  1086. }
  1087. EXPORT_SYMBOL_GPL(irq_chip_eoi_parent);
  1088. /**
  1089. * irq_chip_set_affinity_parent - Set affinity on the parent interrupt
  1090. * @data: Pointer to interrupt specific data
  1091. * @dest: The affinity mask to set
  1092. * @force: Flag to enforce setting (disable online checks)
  1093. *
  1094. * Conditinal, as the underlying parent chip might not implement it.
  1095. */
  1096. int irq_chip_set_affinity_parent(struct irq_data *data,
  1097. const struct cpumask *dest, bool force)
  1098. {
  1099. data = data->parent_data;
  1100. if (data->chip->irq_set_affinity)
  1101. return data->chip->irq_set_affinity(data, dest, force);
  1102. return -ENOSYS;
  1103. }
  1104. EXPORT_SYMBOL_GPL(irq_chip_set_affinity_parent);
  1105. /**
  1106. * irq_chip_set_type_parent - Set IRQ type on the parent interrupt
  1107. * @data: Pointer to interrupt specific data
  1108. * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
  1109. *
  1110. * Conditional, as the underlying parent chip might not implement it.
  1111. */
  1112. int irq_chip_set_type_parent(struct irq_data *data, unsigned int type)
  1113. {
  1114. data = data->parent_data;
  1115. if (data->chip->irq_set_type)
  1116. return data->chip->irq_set_type(data, type);
  1117. return -ENOSYS;
  1118. }
  1119. EXPORT_SYMBOL_GPL(irq_chip_set_type_parent);
  1120. /**
  1121. * irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware
  1122. * @data: Pointer to interrupt specific data
  1123. *
  1124. * Iterate through the domain hierarchy of the interrupt and check
  1125. * whether a hw retrigger function exists. If yes, invoke it.
  1126. */
  1127. int irq_chip_retrigger_hierarchy(struct irq_data *data)
  1128. {
  1129. for (data = data->parent_data; data; data = data->parent_data)
  1130. if (data->chip && data->chip->irq_retrigger)
  1131. return data->chip->irq_retrigger(data);
  1132. return 0;
  1133. }
  1134. /**
  1135. * irq_chip_set_vcpu_affinity_parent - Set vcpu affinity on the parent interrupt
  1136. * @data: Pointer to interrupt specific data
  1137. * @vcpu_info: The vcpu affinity information
  1138. */
  1139. int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, void *vcpu_info)
  1140. {
  1141. data = data->parent_data;
  1142. if (data->chip->irq_set_vcpu_affinity)
  1143. return data->chip->irq_set_vcpu_affinity(data, vcpu_info);
  1144. return -ENOSYS;
  1145. }
  1146. /**
  1147. * irq_chip_set_wake_parent - Set/reset wake-up on the parent interrupt
  1148. * @data: Pointer to interrupt specific data
  1149. * @on: Whether to set or reset the wake-up capability of this irq
  1150. *
  1151. * Conditional, as the underlying parent chip might not implement it.
  1152. */
  1153. int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on)
  1154. {
  1155. data = data->parent_data;
  1156. if (data->chip->irq_set_wake)
  1157. return data->chip->irq_set_wake(data, on);
  1158. return -ENOSYS;
  1159. }
  1160. #endif
  1161. /**
  1162. * irq_chip_compose_msi_msg - Componse msi message for a irq chip
  1163. * @data: Pointer to interrupt specific data
  1164. * @msg: Pointer to the MSI message
  1165. *
  1166. * For hierarchical domains we find the first chip in the hierarchy
  1167. * which implements the irq_compose_msi_msg callback. For non
  1168. * hierarchical we use the top level chip.
  1169. */
  1170. int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
  1171. {
  1172. struct irq_data *pos = NULL;
  1173. #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
  1174. for (; data; data = data->parent_data)
  1175. #endif
  1176. if (data->chip && data->chip->irq_compose_msi_msg)
  1177. pos = data;
  1178. if (!pos)
  1179. return -ENOSYS;
  1180. pos->chip->irq_compose_msi_msg(pos, msg);
  1181. return 0;
  1182. }
  1183. /**
  1184. * irq_chip_pm_get - Enable power for an IRQ chip
  1185. * @data: Pointer to interrupt specific data
  1186. *
  1187. * Enable the power to the IRQ chip referenced by the interrupt data
  1188. * structure.
  1189. */
  1190. int irq_chip_pm_get(struct irq_data *data)
  1191. {
  1192. int retval;
  1193. if (IS_ENABLED(CONFIG_PM) && data->chip->parent_device) {
  1194. retval = pm_runtime_get_sync(data->chip->parent_device);
  1195. if (retval < 0) {
  1196. pm_runtime_put_noidle(data->chip->parent_device);
  1197. return retval;
  1198. }
  1199. }
  1200. return 0;
  1201. }
  1202. /**
  1203. * irq_chip_pm_put - Disable power for an IRQ chip
  1204. * @data: Pointer to interrupt specific data
  1205. *
  1206. * Disable the power to the IRQ chip referenced by the interrupt data
  1207. * structure, belongs. Note that power will only be disabled, once this
  1208. * function has been called for all IRQs that have called irq_chip_pm_get().
  1209. */
  1210. int irq_chip_pm_put(struct irq_data *data)
  1211. {
  1212. int retval = 0;
  1213. if (IS_ENABLED(CONFIG_PM) && data->chip->parent_device)
  1214. retval = pm_runtime_put(data->chip->parent_device);
  1215. return (retval < 0) ? retval : 0;
  1216. }