manage.c 52 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029
  1. /*
  2. * linux/kernel/irq/manage.c
  3. *
  4. * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
  5. * Copyright (C) 2005-2006 Thomas Gleixner
  6. *
  7. * This file contains driver APIs to the irq subsystem.
  8. */
  9. #define pr_fmt(fmt) "genirq: " fmt
  10. #include <linux/irq.h>
  11. #include <linux/kthread.h>
  12. #include <linux/module.h>
  13. #include <linux/random.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/slab.h>
  16. #include <linux/sched.h>
  17. #include <linux/sched/rt.h>
  18. #include <linux/task_work.h>
  19. #include "internals.h"
  20. #ifdef CONFIG_IRQ_FORCED_THREADING
  21. __read_mostly bool force_irqthreads;
  22. static int __init setup_forced_irqthreads(char *arg)
  23. {
  24. force_irqthreads = true;
  25. return 0;
  26. }
  27. early_param("threadirqs", setup_forced_irqthreads);
  28. #endif
  29. static void __synchronize_hardirq(struct irq_desc *desc)
  30. {
  31. bool inprogress;
  32. do {
  33. unsigned long flags;
  34. /*
  35. * Wait until we're out of the critical section. This might
  36. * give the wrong answer due to the lack of memory barriers.
  37. */
  38. while (irqd_irq_inprogress(&desc->irq_data))
  39. cpu_relax();
  40. /* Ok, that indicated we're done: double-check carefully. */
  41. raw_spin_lock_irqsave(&desc->lock, flags);
  42. inprogress = irqd_irq_inprogress(&desc->irq_data);
  43. raw_spin_unlock_irqrestore(&desc->lock, flags);
  44. /* Oops, that failed? */
  45. } while (inprogress);
  46. }
  47. /**
  48. * synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs)
  49. * @irq: interrupt number to wait for
  50. *
  51. * This function waits for any pending hard IRQ handlers for this
  52. * interrupt to complete before returning. If you use this
  53. * function while holding a resource the IRQ handler may need you
  54. * will deadlock. It does not take associated threaded handlers
  55. * into account.
  56. *
  57. * Do not use this for shutdown scenarios where you must be sure
  58. * that all parts (hardirq and threaded handler) have completed.
  59. *
  60. * Returns: false if a threaded handler is active.
  61. *
  62. * This function may be called - with care - from IRQ context.
  63. */
  64. bool synchronize_hardirq(unsigned int irq)
  65. {
  66. struct irq_desc *desc = irq_to_desc(irq);
  67. if (desc) {
  68. __synchronize_hardirq(desc);
  69. return !atomic_read(&desc->threads_active);
  70. }
  71. return true;
  72. }
  73. EXPORT_SYMBOL(synchronize_hardirq);
  74. /**
  75. * synchronize_irq - wait for pending IRQ handlers (on other CPUs)
  76. * @irq: interrupt number to wait for
  77. *
  78. * This function waits for any pending IRQ handlers for this interrupt
  79. * to complete before returning. If you use this function while
  80. * holding a resource the IRQ handler may need you will deadlock.
  81. *
  82. * This function may be called - with care - from IRQ context.
  83. */
  84. void synchronize_irq(unsigned int irq)
  85. {
  86. struct irq_desc *desc = irq_to_desc(irq);
  87. if (desc) {
  88. __synchronize_hardirq(desc);
  89. /*
  90. * We made sure that no hardirq handler is
  91. * running. Now verify that no threaded handlers are
  92. * active.
  93. */
  94. wait_event(desc->wait_for_threads,
  95. !atomic_read(&desc->threads_active));
  96. }
  97. }
  98. EXPORT_SYMBOL(synchronize_irq);
  99. #ifdef CONFIG_SMP
  100. cpumask_var_t irq_default_affinity;
  101. static int __irq_can_set_affinity(struct irq_desc *desc)
  102. {
  103. if (!desc || !irqd_can_balance(&desc->irq_data) ||
  104. !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
  105. return 0;
  106. return 1;
  107. }
  108. /**
  109. * irq_can_set_affinity - Check if the affinity of a given irq can be set
  110. * @irq: Interrupt to check
  111. *
  112. */
  113. int irq_can_set_affinity(unsigned int irq)
  114. {
  115. return __irq_can_set_affinity(irq_to_desc(irq));
  116. }
  117. /**
  118. * irq_set_thread_affinity - Notify irq threads to adjust affinity
  119. * @desc: irq descriptor which has affitnity changed
  120. *
  121. * We just set IRQTF_AFFINITY and delegate the affinity setting
  122. * to the interrupt thread itself. We can not call
  123. * set_cpus_allowed_ptr() here as we hold desc->lock and this
  124. * code can be called from hard interrupt context.
  125. */
  126. void irq_set_thread_affinity(struct irq_desc *desc)
  127. {
  128. struct irqaction *action;
  129. for_each_action_of_desc(desc, action)
  130. if (action->thread)
  131. set_bit(IRQTF_AFFINITY, &action->thread_flags);
  132. }
  133. #ifdef CONFIG_GENERIC_PENDING_IRQ
  134. static inline bool irq_can_move_pcntxt(struct irq_data *data)
  135. {
  136. return irqd_can_move_in_process_context(data);
  137. }
  138. static inline bool irq_move_pending(struct irq_data *data)
  139. {
  140. return irqd_is_setaffinity_pending(data);
  141. }
  142. static inline void
  143. irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask)
  144. {
  145. cpumask_copy(desc->pending_mask, mask);
  146. }
  147. static inline void
  148. irq_get_pending(struct cpumask *mask, struct irq_desc *desc)
  149. {
  150. cpumask_copy(mask, desc->pending_mask);
  151. }
  152. #else
  153. static inline bool irq_can_move_pcntxt(struct irq_data *data) { return true; }
  154. static inline bool irq_move_pending(struct irq_data *data) { return false; }
  155. static inline void
  156. irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { }
  157. static inline void
  158. irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
  159. #endif
  160. int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
  161. bool force)
  162. {
  163. struct irq_desc *desc = irq_data_to_desc(data);
  164. struct irq_chip *chip = irq_data_get_irq_chip(data);
  165. int ret;
  166. ret = chip->irq_set_affinity(data, mask, force);
  167. switch (ret) {
  168. case IRQ_SET_MASK_OK:
  169. case IRQ_SET_MASK_OK_DONE:
  170. cpumask_copy(desc->irq_common_data.affinity, mask);
  171. case IRQ_SET_MASK_OK_NOCOPY:
  172. irq_set_thread_affinity(desc);
  173. ret = 0;
  174. }
  175. return ret;
  176. }
  177. int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
  178. bool force)
  179. {
  180. struct irq_chip *chip = irq_data_get_irq_chip(data);
  181. struct irq_desc *desc = irq_data_to_desc(data);
  182. int ret = 0;
  183. if (!chip || !chip->irq_set_affinity)
  184. return -EINVAL;
  185. if (irq_can_move_pcntxt(data)) {
  186. ret = irq_do_set_affinity(data, mask, force);
  187. } else {
  188. irqd_set_move_pending(data);
  189. irq_copy_pending(desc, mask);
  190. }
  191. if (desc->affinity_notify) {
  192. kref_get(&desc->affinity_notify->kref);
  193. schedule_work(&desc->affinity_notify->work);
  194. }
  195. irqd_set(data, IRQD_AFFINITY_SET);
  196. return ret;
  197. }
  198. int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
  199. {
  200. struct irq_desc *desc = irq_to_desc(irq);
  201. unsigned long flags;
  202. int ret;
  203. if (!desc)
  204. return -EINVAL;
  205. raw_spin_lock_irqsave(&desc->lock, flags);
  206. ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
  207. raw_spin_unlock_irqrestore(&desc->lock, flags);
  208. return ret;
  209. }
  210. int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
  211. {
  212. unsigned long flags;
  213. struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
  214. if (!desc)
  215. return -EINVAL;
  216. desc->affinity_hint = m;
  217. irq_put_desc_unlock(desc, flags);
  218. /* set the initial affinity to prevent every interrupt being on CPU0 */
  219. if (m)
  220. __irq_set_affinity(irq, m, false);
  221. return 0;
  222. }
  223. EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
  224. static void irq_affinity_notify(struct work_struct *work)
  225. {
  226. struct irq_affinity_notify *notify =
  227. container_of(work, struct irq_affinity_notify, work);
  228. struct irq_desc *desc = irq_to_desc(notify->irq);
  229. cpumask_var_t cpumask;
  230. unsigned long flags;
  231. if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
  232. goto out;
  233. raw_spin_lock_irqsave(&desc->lock, flags);
  234. if (irq_move_pending(&desc->irq_data))
  235. irq_get_pending(cpumask, desc);
  236. else
  237. cpumask_copy(cpumask, desc->irq_common_data.affinity);
  238. raw_spin_unlock_irqrestore(&desc->lock, flags);
  239. notify->notify(notify, cpumask);
  240. free_cpumask_var(cpumask);
  241. out:
  242. kref_put(&notify->kref, notify->release);
  243. }
  244. /**
  245. * irq_set_affinity_notifier - control notification of IRQ affinity changes
  246. * @irq: Interrupt for which to enable/disable notification
  247. * @notify: Context for notification, or %NULL to disable
  248. * notification. Function pointers must be initialised;
  249. * the other fields will be initialised by this function.
  250. *
  251. * Must be called in process context. Notification may only be enabled
  252. * after the IRQ is allocated and must be disabled before the IRQ is
  253. * freed using free_irq().
  254. */
  255. int
  256. irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
  257. {
  258. struct irq_desc *desc = irq_to_desc(irq);
  259. struct irq_affinity_notify *old_notify;
  260. unsigned long flags;
  261. /* The release function is promised process context */
  262. might_sleep();
  263. if (!desc)
  264. return -EINVAL;
  265. /* Complete initialisation of *notify */
  266. if (notify) {
  267. notify->irq = irq;
  268. kref_init(&notify->kref);
  269. INIT_WORK(&notify->work, irq_affinity_notify);
  270. }
  271. raw_spin_lock_irqsave(&desc->lock, flags);
  272. old_notify = desc->affinity_notify;
  273. desc->affinity_notify = notify;
  274. raw_spin_unlock_irqrestore(&desc->lock, flags);
  275. if (old_notify)
  276. kref_put(&old_notify->kref, old_notify->release);
  277. return 0;
  278. }
  279. EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
  280. #ifndef CONFIG_AUTO_IRQ_AFFINITY
  281. /*
  282. * Generic version of the affinity autoselector.
  283. */
  284. static int setup_affinity(struct irq_desc *desc, struct cpumask *mask)
  285. {
  286. struct cpumask *set = irq_default_affinity;
  287. int node = irq_desc_get_node(desc);
  288. /* Excludes PER_CPU and NO_BALANCE interrupts */
  289. if (!__irq_can_set_affinity(desc))
  290. return 0;
  291. /*
  292. * Preserve an userspace affinity setup, but make sure that
  293. * one of the targets is online.
  294. */
  295. if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
  296. if (cpumask_intersects(desc->irq_common_data.affinity,
  297. cpu_online_mask))
  298. set = desc->irq_common_data.affinity;
  299. else
  300. irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
  301. }
  302. cpumask_and(mask, cpu_online_mask, set);
  303. if (node != NUMA_NO_NODE) {
  304. const struct cpumask *nodemask = cpumask_of_node(node);
  305. /* make sure at least one of the cpus in nodemask is online */
  306. if (cpumask_intersects(mask, nodemask))
  307. cpumask_and(mask, mask, nodemask);
  308. }
  309. irq_do_set_affinity(&desc->irq_data, mask, false);
  310. return 0;
  311. }
  312. #else
  313. /* Wrapper for ALPHA specific affinity selector magic */
  314. static inline int setup_affinity(struct irq_desc *d, struct cpumask *mask)
  315. {
  316. return irq_select_affinity(irq_desc_get_irq(d));
  317. }
  318. #endif
  319. /*
  320. * Called when affinity is set via /proc/irq
  321. */
  322. int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask)
  323. {
  324. struct irq_desc *desc = irq_to_desc(irq);
  325. unsigned long flags;
  326. int ret;
  327. raw_spin_lock_irqsave(&desc->lock, flags);
  328. ret = setup_affinity(desc, mask);
  329. raw_spin_unlock_irqrestore(&desc->lock, flags);
  330. return ret;
  331. }
  332. #else
  333. static inline int
  334. setup_affinity(struct irq_desc *desc, struct cpumask *mask)
  335. {
  336. return 0;
  337. }
  338. #endif
  339. /**
  340. * irq_set_vcpu_affinity - Set vcpu affinity for the interrupt
  341. * @irq: interrupt number to set affinity
  342. * @vcpu_info: vCPU specific data
  343. *
  344. * This function uses the vCPU specific data to set the vCPU
  345. * affinity for an irq. The vCPU specific data is passed from
  346. * outside, such as KVM. One example code path is as below:
  347. * KVM -> IOMMU -> irq_set_vcpu_affinity().
  348. */
  349. int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
  350. {
  351. unsigned long flags;
  352. struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
  353. struct irq_data *data;
  354. struct irq_chip *chip;
  355. int ret = -ENOSYS;
  356. if (!desc)
  357. return -EINVAL;
  358. data = irq_desc_get_irq_data(desc);
  359. chip = irq_data_get_irq_chip(data);
  360. if (chip && chip->irq_set_vcpu_affinity)
  361. ret = chip->irq_set_vcpu_affinity(data, vcpu_info);
  362. irq_put_desc_unlock(desc, flags);
  363. return ret;
  364. }
  365. EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity);
  366. void __disable_irq(struct irq_desc *desc)
  367. {
  368. if (!desc->depth++)
  369. irq_disable(desc);
  370. }
  371. static int __disable_irq_nosync(unsigned int irq)
  372. {
  373. unsigned long flags;
  374. struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
  375. if (!desc)
  376. return -EINVAL;
  377. __disable_irq(desc);
  378. irq_put_desc_busunlock(desc, flags);
  379. return 0;
  380. }
  381. /**
  382. * disable_irq_nosync - disable an irq without waiting
  383. * @irq: Interrupt to disable
  384. *
  385. * Disable the selected interrupt line. Disables and Enables are
  386. * nested.
  387. * Unlike disable_irq(), this function does not ensure existing
  388. * instances of the IRQ handler have completed before returning.
  389. *
  390. * This function may be called from IRQ context.
  391. */
  392. void disable_irq_nosync(unsigned int irq)
  393. {
  394. __disable_irq_nosync(irq);
  395. }
  396. EXPORT_SYMBOL(disable_irq_nosync);
  397. /**
  398. * disable_irq - disable an irq and wait for completion
  399. * @irq: Interrupt to disable
  400. *
  401. * Disable the selected interrupt line. Enables and Disables are
  402. * nested.
  403. * This function waits for any pending IRQ handlers for this interrupt
  404. * to complete before returning. If you use this function while
  405. * holding a resource the IRQ handler may need you will deadlock.
  406. *
  407. * This function may be called - with care - from IRQ context.
  408. */
  409. void disable_irq(unsigned int irq)
  410. {
  411. if (!__disable_irq_nosync(irq))
  412. synchronize_irq(irq);
  413. }
  414. EXPORT_SYMBOL(disable_irq);
  415. /**
  416. * disable_hardirq - disables an irq and waits for hardirq completion
  417. * @irq: Interrupt to disable
  418. *
  419. * Disable the selected interrupt line. Enables and Disables are
  420. * nested.
  421. * This function waits for any pending hard IRQ handlers for this
  422. * interrupt to complete before returning. If you use this function while
  423. * holding a resource the hard IRQ handler may need you will deadlock.
  424. *
  425. * When used to optimistically disable an interrupt from atomic context
  426. * the return value must be checked.
  427. *
  428. * Returns: false if a threaded handler is active.
  429. *
  430. * This function may be called - with care - from IRQ context.
  431. */
  432. bool disable_hardirq(unsigned int irq)
  433. {
  434. if (!__disable_irq_nosync(irq))
  435. return synchronize_hardirq(irq);
  436. return false;
  437. }
  438. EXPORT_SYMBOL_GPL(disable_hardirq);
  439. void __enable_irq(struct irq_desc *desc)
  440. {
  441. switch (desc->depth) {
  442. case 0:
  443. err_out:
  444. WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n",
  445. irq_desc_get_irq(desc));
  446. break;
  447. case 1: {
  448. if (desc->istate & IRQS_SUSPENDED)
  449. goto err_out;
  450. /* Prevent probing on this irq: */
  451. irq_settings_set_noprobe(desc);
  452. irq_enable(desc);
  453. check_irq_resend(desc);
  454. /* fall-through */
  455. }
  456. default:
  457. desc->depth--;
  458. }
  459. }
  460. /**
  461. * enable_irq - enable handling of an irq
  462. * @irq: Interrupt to enable
  463. *
  464. * Undoes the effect of one call to disable_irq(). If this
  465. * matches the last disable, processing of interrupts on this
  466. * IRQ line is re-enabled.
  467. *
  468. * This function may be called from IRQ context only when
  469. * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
  470. */
  471. void enable_irq(unsigned int irq)
  472. {
  473. unsigned long flags;
  474. struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
  475. if (!desc)
  476. return;
  477. if (WARN(!desc->irq_data.chip,
  478. KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
  479. goto out;
  480. __enable_irq(desc);
  481. out:
  482. irq_put_desc_busunlock(desc, flags);
  483. }
  484. EXPORT_SYMBOL(enable_irq);
  485. static int set_irq_wake_real(unsigned int irq, unsigned int on)
  486. {
  487. struct irq_desc *desc = irq_to_desc(irq);
  488. int ret = -ENXIO;
  489. if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE)
  490. return 0;
  491. if (desc->irq_data.chip->irq_set_wake)
  492. ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
  493. return ret;
  494. }
  495. /**
  496. * irq_set_irq_wake - control irq power management wakeup
  497. * @irq: interrupt to control
  498. * @on: enable/disable power management wakeup
  499. *
  500. * Enable/disable power management wakeup mode, which is
  501. * disabled by default. Enables and disables must match,
  502. * just as they match for non-wakeup mode support.
  503. *
  504. * Wakeup mode lets this IRQ wake the system from sleep
  505. * states like "suspend to RAM".
  506. */
  507. int irq_set_irq_wake(unsigned int irq, unsigned int on)
  508. {
  509. unsigned long flags;
  510. struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
  511. int ret = 0;
  512. if (!desc)
  513. return -EINVAL;
  514. /* wakeup-capable irqs can be shared between drivers that
  515. * don't need to have the same sleep mode behaviors.
  516. */
  517. if (on) {
  518. if (desc->wake_depth++ == 0) {
  519. ret = set_irq_wake_real(irq, on);
  520. if (ret)
  521. desc->wake_depth = 0;
  522. else
  523. irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
  524. }
  525. } else {
  526. if (desc->wake_depth == 0) {
  527. WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
  528. } else if (--desc->wake_depth == 0) {
  529. ret = set_irq_wake_real(irq, on);
  530. if (ret)
  531. desc->wake_depth = 1;
  532. else
  533. irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
  534. }
  535. }
  536. irq_put_desc_busunlock(desc, flags);
  537. return ret;
  538. }
  539. EXPORT_SYMBOL(irq_set_irq_wake);
  540. /*
  541. * Internal function that tells the architecture code whether a
  542. * particular irq has been exclusively allocated or is available
  543. * for driver use.
  544. */
  545. int can_request_irq(unsigned int irq, unsigned long irqflags)
  546. {
  547. unsigned long flags;
  548. struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
  549. int canrequest = 0;
  550. if (!desc)
  551. return 0;
  552. if (irq_settings_can_request(desc)) {
  553. if (!desc->action ||
  554. irqflags & desc->action->flags & IRQF_SHARED)
  555. canrequest = 1;
  556. }
  557. irq_put_desc_unlock(desc, flags);
  558. return canrequest;
  559. }
  560. int __irq_set_trigger(struct irq_desc *desc, unsigned long flags)
  561. {
  562. struct irq_chip *chip = desc->irq_data.chip;
  563. int ret, unmask = 0;
  564. if (!chip || !chip->irq_set_type) {
  565. /*
  566. * IRQF_TRIGGER_* but the PIC does not support multiple
  567. * flow-types?
  568. */
  569. pr_debug("No set_type function for IRQ %d (%s)\n",
  570. irq_desc_get_irq(desc),
  571. chip ? (chip->name ? : "unknown") : "unknown");
  572. return 0;
  573. }
  574. flags &= IRQ_TYPE_SENSE_MASK;
  575. if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
  576. if (!irqd_irq_masked(&desc->irq_data))
  577. mask_irq(desc);
  578. if (!irqd_irq_disabled(&desc->irq_data))
  579. unmask = 1;
  580. }
  581. /* caller masked out all except trigger mode flags */
  582. ret = chip->irq_set_type(&desc->irq_data, flags);
  583. switch (ret) {
  584. case IRQ_SET_MASK_OK:
  585. case IRQ_SET_MASK_OK_DONE:
  586. irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
  587. irqd_set(&desc->irq_data, flags);
  588. case IRQ_SET_MASK_OK_NOCOPY:
  589. flags = irqd_get_trigger_type(&desc->irq_data);
  590. irq_settings_set_trigger_mask(desc, flags);
  591. irqd_clear(&desc->irq_data, IRQD_LEVEL);
  592. irq_settings_clr_level(desc);
  593. if (flags & IRQ_TYPE_LEVEL_MASK) {
  594. irq_settings_set_level(desc);
  595. irqd_set(&desc->irq_data, IRQD_LEVEL);
  596. }
  597. ret = 0;
  598. break;
  599. default:
  600. pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n",
  601. flags, irq_desc_get_irq(desc), chip->irq_set_type);
  602. }
  603. if (unmask)
  604. unmask_irq(desc);
  605. return ret;
  606. }
  607. #ifdef CONFIG_HARDIRQS_SW_RESEND
  608. int irq_set_parent(int irq, int parent_irq)
  609. {
  610. unsigned long flags;
  611. struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
  612. if (!desc)
  613. return -EINVAL;
  614. desc->parent_irq = parent_irq;
  615. irq_put_desc_unlock(desc, flags);
  616. return 0;
  617. }
  618. #endif
  619. /*
  620. * Default primary interrupt handler for threaded interrupts. Is
  621. * assigned as primary handler when request_threaded_irq is called
  622. * with handler == NULL. Useful for oneshot interrupts.
  623. */
  624. static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
  625. {
  626. return IRQ_WAKE_THREAD;
  627. }
  628. /*
  629. * Primary handler for nested threaded interrupts. Should never be
  630. * called.
  631. */
  632. static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
  633. {
  634. WARN(1, "Primary handler called for nested irq %d\n", irq);
  635. return IRQ_NONE;
  636. }
  637. static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
  638. {
  639. WARN(1, "Secondary action handler called for irq %d\n", irq);
  640. return IRQ_NONE;
  641. }
  642. static int irq_wait_for_interrupt(struct irqaction *action)
  643. {
  644. set_current_state(TASK_INTERRUPTIBLE);
  645. while (!kthread_should_stop()) {
  646. if (test_and_clear_bit(IRQTF_RUNTHREAD,
  647. &action->thread_flags)) {
  648. __set_current_state(TASK_RUNNING);
  649. return 0;
  650. }
  651. schedule();
  652. set_current_state(TASK_INTERRUPTIBLE);
  653. }
  654. __set_current_state(TASK_RUNNING);
  655. return -1;
  656. }
  657. /*
  658. * Oneshot interrupts keep the irq line masked until the threaded
  659. * handler finished. unmask if the interrupt has not been disabled and
  660. * is marked MASKED.
  661. */
  662. static void irq_finalize_oneshot(struct irq_desc *desc,
  663. struct irqaction *action)
  664. {
  665. if (!(desc->istate & IRQS_ONESHOT) ||
  666. action->handler == irq_forced_secondary_handler)
  667. return;
  668. again:
  669. chip_bus_lock(desc);
  670. raw_spin_lock_irq(&desc->lock);
  671. /*
  672. * Implausible though it may be we need to protect us against
  673. * the following scenario:
  674. *
  675. * The thread is faster done than the hard interrupt handler
  676. * on the other CPU. If we unmask the irq line then the
  677. * interrupt can come in again and masks the line, leaves due
  678. * to IRQS_INPROGRESS and the irq line is masked forever.
  679. *
  680. * This also serializes the state of shared oneshot handlers
  681. * versus "desc->threads_onehsot |= action->thread_mask;" in
  682. * irq_wake_thread(). See the comment there which explains the
  683. * serialization.
  684. */
  685. if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
  686. raw_spin_unlock_irq(&desc->lock);
  687. chip_bus_sync_unlock(desc);
  688. cpu_relax();
  689. goto again;
  690. }
  691. /*
  692. * Now check again, whether the thread should run. Otherwise
  693. * we would clear the threads_oneshot bit of this thread which
  694. * was just set.
  695. */
  696. if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
  697. goto out_unlock;
  698. desc->threads_oneshot &= ~action->thread_mask;
  699. if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
  700. irqd_irq_masked(&desc->irq_data))
  701. unmask_threaded_irq(desc);
  702. out_unlock:
  703. raw_spin_unlock_irq(&desc->lock);
  704. chip_bus_sync_unlock(desc);
  705. }
  706. #ifdef CONFIG_SMP
  707. /*
  708. * Check whether we need to change the affinity of the interrupt thread.
  709. */
  710. static void
  711. irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
  712. {
  713. cpumask_var_t mask;
  714. bool valid = true;
  715. if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
  716. return;
  717. /*
  718. * In case we are out of memory we set IRQTF_AFFINITY again and
  719. * try again next time
  720. */
  721. if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
  722. set_bit(IRQTF_AFFINITY, &action->thread_flags);
  723. return;
  724. }
  725. raw_spin_lock_irq(&desc->lock);
  726. /*
  727. * This code is triggered unconditionally. Check the affinity
  728. * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
  729. */
  730. if (desc->irq_common_data.affinity)
  731. cpumask_copy(mask, desc->irq_common_data.affinity);
  732. else
  733. valid = false;
  734. raw_spin_unlock_irq(&desc->lock);
  735. if (valid)
  736. set_cpus_allowed_ptr(current, mask);
  737. free_cpumask_var(mask);
  738. }
  739. #else
  740. static inline void
  741. irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
  742. #endif
  743. /*
  744. * Interrupts which are not explicitely requested as threaded
  745. * interrupts rely on the implicit bh/preempt disable of the hard irq
  746. * context. So we need to disable bh here to avoid deadlocks and other
  747. * side effects.
  748. */
  749. static irqreturn_t
  750. irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
  751. {
  752. irqreturn_t ret;
  753. local_bh_disable();
  754. ret = action->thread_fn(action->irq, action->dev_id);
  755. irq_finalize_oneshot(desc, action);
  756. local_bh_enable();
  757. return ret;
  758. }
  759. /*
  760. * Interrupts explicitly requested as threaded interrupts want to be
  761. * preemtible - many of them need to sleep and wait for slow busses to
  762. * complete.
  763. */
  764. static irqreturn_t irq_thread_fn(struct irq_desc *desc,
  765. struct irqaction *action)
  766. {
  767. irqreturn_t ret;
  768. ret = action->thread_fn(action->irq, action->dev_id);
  769. irq_finalize_oneshot(desc, action);
  770. return ret;
  771. }
  772. static void wake_threads_waitq(struct irq_desc *desc)
  773. {
  774. if (atomic_dec_and_test(&desc->threads_active))
  775. wake_up(&desc->wait_for_threads);
  776. }
  777. static void irq_thread_dtor(struct callback_head *unused)
  778. {
  779. struct task_struct *tsk = current;
  780. struct irq_desc *desc;
  781. struct irqaction *action;
  782. if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
  783. return;
  784. action = kthread_data(tsk);
  785. pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
  786. tsk->comm, tsk->pid, action->irq);
  787. desc = irq_to_desc(action->irq);
  788. /*
  789. * If IRQTF_RUNTHREAD is set, we need to decrement
  790. * desc->threads_active and wake possible waiters.
  791. */
  792. if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
  793. wake_threads_waitq(desc);
  794. /* Prevent a stale desc->threads_oneshot */
  795. irq_finalize_oneshot(desc, action);
  796. }
  797. static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action)
  798. {
  799. struct irqaction *secondary = action->secondary;
  800. if (WARN_ON_ONCE(!secondary))
  801. return;
  802. raw_spin_lock_irq(&desc->lock);
  803. __irq_wake_thread(desc, secondary);
  804. raw_spin_unlock_irq(&desc->lock);
  805. }
  806. /*
  807. * Interrupt handler thread
  808. */
  809. static int irq_thread(void *data)
  810. {
  811. struct callback_head on_exit_work;
  812. struct irqaction *action = data;
  813. struct irq_desc *desc = irq_to_desc(action->irq);
  814. irqreturn_t (*handler_fn)(struct irq_desc *desc,
  815. struct irqaction *action);
  816. if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
  817. &action->thread_flags))
  818. handler_fn = irq_forced_thread_fn;
  819. else
  820. handler_fn = irq_thread_fn;
  821. init_task_work(&on_exit_work, irq_thread_dtor);
  822. task_work_add(current, &on_exit_work, false);
  823. irq_thread_check_affinity(desc, action);
  824. while (!irq_wait_for_interrupt(action)) {
  825. irqreturn_t action_ret;
  826. irq_thread_check_affinity(desc, action);
  827. action_ret = handler_fn(desc, action);
  828. if (action_ret == IRQ_HANDLED)
  829. atomic_inc(&desc->threads_handled);
  830. if (action_ret == IRQ_WAKE_THREAD)
  831. irq_wake_secondary(desc, action);
  832. wake_threads_waitq(desc);
  833. }
  834. /*
  835. * This is the regular exit path. __free_irq() is stopping the
  836. * thread via kthread_stop() after calling
  837. * synchronize_irq(). So neither IRQTF_RUNTHREAD nor the
  838. * oneshot mask bit can be set. We cannot verify that as we
  839. * cannot touch the oneshot mask at this point anymore as
  840. * __setup_irq() might have given out currents thread_mask
  841. * again.
  842. */
  843. task_work_cancel(current, irq_thread_dtor);
  844. return 0;
  845. }
  846. /**
  847. * irq_wake_thread - wake the irq thread for the action identified by dev_id
  848. * @irq: Interrupt line
  849. * @dev_id: Device identity for which the thread should be woken
  850. *
  851. */
  852. void irq_wake_thread(unsigned int irq, void *dev_id)
  853. {
  854. struct irq_desc *desc = irq_to_desc(irq);
  855. struct irqaction *action;
  856. unsigned long flags;
  857. if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
  858. return;
  859. raw_spin_lock_irqsave(&desc->lock, flags);
  860. for_each_action_of_desc(desc, action) {
  861. if (action->dev_id == dev_id) {
  862. if (action->thread)
  863. __irq_wake_thread(desc, action);
  864. break;
  865. }
  866. }
  867. raw_spin_unlock_irqrestore(&desc->lock, flags);
  868. }
  869. EXPORT_SYMBOL_GPL(irq_wake_thread);
  870. static int irq_setup_forced_threading(struct irqaction *new)
  871. {
  872. if (!force_irqthreads)
  873. return 0;
  874. if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
  875. return 0;
  876. new->flags |= IRQF_ONESHOT;
  877. /*
  878. * Handle the case where we have a real primary handler and a
  879. * thread handler. We force thread them as well by creating a
  880. * secondary action.
  881. */
  882. if (new->handler != irq_default_primary_handler && new->thread_fn) {
  883. /* Allocate the secondary action */
  884. new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
  885. if (!new->secondary)
  886. return -ENOMEM;
  887. new->secondary->handler = irq_forced_secondary_handler;
  888. new->secondary->thread_fn = new->thread_fn;
  889. new->secondary->dev_id = new->dev_id;
  890. new->secondary->irq = new->irq;
  891. new->secondary->name = new->name;
  892. }
  893. /* Deal with the primary handler */
  894. set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
  895. new->thread_fn = new->handler;
  896. new->handler = irq_default_primary_handler;
  897. return 0;
  898. }
  899. static int irq_request_resources(struct irq_desc *desc)
  900. {
  901. struct irq_data *d = &desc->irq_data;
  902. struct irq_chip *c = d->chip;
  903. return c->irq_request_resources ? c->irq_request_resources(d) : 0;
  904. }
  905. static void irq_release_resources(struct irq_desc *desc)
  906. {
  907. struct irq_data *d = &desc->irq_data;
  908. struct irq_chip *c = d->chip;
  909. if (c->irq_release_resources)
  910. c->irq_release_resources(d);
  911. }
  912. static int
  913. setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
  914. {
  915. struct task_struct *t;
  916. struct sched_param param = {
  917. .sched_priority = MAX_USER_RT_PRIO/2,
  918. };
  919. if (!secondary) {
  920. t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
  921. new->name);
  922. } else {
  923. t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq,
  924. new->name);
  925. param.sched_priority -= 1;
  926. }
  927. if (IS_ERR(t))
  928. return PTR_ERR(t);
  929. sched_setscheduler_nocheck(t, SCHED_FIFO, &param);
  930. /*
  931. * We keep the reference to the task struct even if
  932. * the thread dies to avoid that the interrupt code
  933. * references an already freed task_struct.
  934. */
  935. get_task_struct(t);
  936. new->thread = t;
  937. /*
  938. * Tell the thread to set its affinity. This is
  939. * important for shared interrupt handlers as we do
  940. * not invoke setup_affinity() for the secondary
  941. * handlers as everything is already set up. Even for
  942. * interrupts marked with IRQF_NO_BALANCE this is
  943. * correct as we want the thread to move to the cpu(s)
  944. * on which the requesting code placed the interrupt.
  945. */
  946. set_bit(IRQTF_AFFINITY, &new->thread_flags);
  947. return 0;
  948. }
  949. /*
  950. * Internal function to register an irqaction - typically used to
  951. * allocate special interrupts that are part of the architecture.
  952. */
  953. static int
  954. __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
  955. {
  956. struct irqaction *old, **old_ptr;
  957. unsigned long flags, thread_mask = 0;
  958. int ret, nested, shared = 0;
  959. cpumask_var_t mask;
  960. if (!desc)
  961. return -EINVAL;
  962. if (desc->irq_data.chip == &no_irq_chip)
  963. return -ENOSYS;
  964. if (!try_module_get(desc->owner))
  965. return -ENODEV;
  966. new->irq = irq;
  967. /*
  968. * Check whether the interrupt nests into another interrupt
  969. * thread.
  970. */
  971. nested = irq_settings_is_nested_thread(desc);
  972. if (nested) {
  973. if (!new->thread_fn) {
  974. ret = -EINVAL;
  975. goto out_mput;
  976. }
  977. /*
  978. * Replace the primary handler which was provided from
  979. * the driver for non nested interrupt handling by the
  980. * dummy function which warns when called.
  981. */
  982. new->handler = irq_nested_primary_handler;
  983. } else {
  984. if (irq_settings_can_thread(desc)) {
  985. ret = irq_setup_forced_threading(new);
  986. if (ret)
  987. goto out_mput;
  988. }
  989. }
  990. /*
  991. * Create a handler thread when a thread function is supplied
  992. * and the interrupt does not nest into another interrupt
  993. * thread.
  994. */
  995. if (new->thread_fn && !nested) {
  996. ret = setup_irq_thread(new, irq, false);
  997. if (ret)
  998. goto out_mput;
  999. if (new->secondary) {
  1000. ret = setup_irq_thread(new->secondary, irq, true);
  1001. if (ret)
  1002. goto out_thread;
  1003. }
  1004. }
  1005. if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
  1006. ret = -ENOMEM;
  1007. goto out_thread;
  1008. }
  1009. /*
  1010. * Drivers are often written to work w/o knowledge about the
  1011. * underlying irq chip implementation, so a request for a
  1012. * threaded irq without a primary hard irq context handler
  1013. * requires the ONESHOT flag to be set. Some irq chips like
  1014. * MSI based interrupts are per se one shot safe. Check the
  1015. * chip flags, so we can avoid the unmask dance at the end of
  1016. * the threaded handler for those.
  1017. */
  1018. if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
  1019. new->flags &= ~IRQF_ONESHOT;
  1020. /*
  1021. * The following block of code has to be executed atomically
  1022. */
  1023. raw_spin_lock_irqsave(&desc->lock, flags);
  1024. old_ptr = &desc->action;
  1025. old = *old_ptr;
  1026. if (old) {
  1027. /*
  1028. * Can't share interrupts unless both agree to and are
  1029. * the same type (level, edge, polarity). So both flag
  1030. * fields must have IRQF_SHARED set and the bits which
  1031. * set the trigger type must match. Also all must
  1032. * agree on ONESHOT.
  1033. */
  1034. if (!((old->flags & new->flags) & IRQF_SHARED) ||
  1035. ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) ||
  1036. ((old->flags ^ new->flags) & IRQF_ONESHOT))
  1037. goto mismatch;
  1038. /* All handlers must agree on per-cpuness */
  1039. if ((old->flags & IRQF_PERCPU) !=
  1040. (new->flags & IRQF_PERCPU))
  1041. goto mismatch;
  1042. /* add new interrupt at end of irq queue */
  1043. do {
  1044. /*
  1045. * Or all existing action->thread_mask bits,
  1046. * so we can find the next zero bit for this
  1047. * new action.
  1048. */
  1049. thread_mask |= old->thread_mask;
  1050. old_ptr = &old->next;
  1051. old = *old_ptr;
  1052. } while (old);
  1053. shared = 1;
  1054. }
  1055. /*
  1056. * Setup the thread mask for this irqaction for ONESHOT. For
  1057. * !ONESHOT irqs the thread mask is 0 so we can avoid a
  1058. * conditional in irq_wake_thread().
  1059. */
  1060. if (new->flags & IRQF_ONESHOT) {
  1061. /*
  1062. * Unlikely to have 32 resp 64 irqs sharing one line,
  1063. * but who knows.
  1064. */
  1065. if (thread_mask == ~0UL) {
  1066. ret = -EBUSY;
  1067. goto out_mask;
  1068. }
  1069. /*
  1070. * The thread_mask for the action is or'ed to
  1071. * desc->thread_active to indicate that the
  1072. * IRQF_ONESHOT thread handler has been woken, but not
  1073. * yet finished. The bit is cleared when a thread
  1074. * completes. When all threads of a shared interrupt
  1075. * line have completed desc->threads_active becomes
  1076. * zero and the interrupt line is unmasked. See
  1077. * handle.c:irq_wake_thread() for further information.
  1078. *
  1079. * If no thread is woken by primary (hard irq context)
  1080. * interrupt handlers, then desc->threads_active is
  1081. * also checked for zero to unmask the irq line in the
  1082. * affected hard irq flow handlers
  1083. * (handle_[fasteoi|level]_irq).
  1084. *
  1085. * The new action gets the first zero bit of
  1086. * thread_mask assigned. See the loop above which or's
  1087. * all existing action->thread_mask bits.
  1088. */
  1089. new->thread_mask = 1 << ffz(thread_mask);
  1090. } else if (new->handler == irq_default_primary_handler &&
  1091. !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
  1092. /*
  1093. * The interrupt was requested with handler = NULL, so
  1094. * we use the default primary handler for it. But it
  1095. * does not have the oneshot flag set. In combination
  1096. * with level interrupts this is deadly, because the
  1097. * default primary handler just wakes the thread, then
  1098. * the irq lines is reenabled, but the device still
  1099. * has the level irq asserted. Rinse and repeat....
  1100. *
  1101. * While this works for edge type interrupts, we play
  1102. * it safe and reject unconditionally because we can't
  1103. * say for sure which type this interrupt really
  1104. * has. The type flags are unreliable as the
  1105. * underlying chip implementation can override them.
  1106. */
  1107. pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n",
  1108. irq);
  1109. ret = -EINVAL;
  1110. goto out_mask;
  1111. }
  1112. if (!shared) {
  1113. ret = irq_request_resources(desc);
  1114. if (ret) {
  1115. pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n",
  1116. new->name, irq, desc->irq_data.chip->name);
  1117. goto out_mask;
  1118. }
  1119. init_waitqueue_head(&desc->wait_for_threads);
  1120. /* Setup the type (level, edge polarity) if configured: */
  1121. if (new->flags & IRQF_TRIGGER_MASK) {
  1122. ret = __irq_set_trigger(desc,
  1123. new->flags & IRQF_TRIGGER_MASK);
  1124. if (ret)
  1125. goto out_mask;
  1126. }
  1127. desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
  1128. IRQS_ONESHOT | IRQS_WAITING);
  1129. irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
  1130. if (new->flags & IRQF_PERCPU) {
  1131. irqd_set(&desc->irq_data, IRQD_PER_CPU);
  1132. irq_settings_set_per_cpu(desc);
  1133. }
  1134. if (new->flags & IRQF_ONESHOT)
  1135. desc->istate |= IRQS_ONESHOT;
  1136. if (irq_settings_can_autoenable(desc))
  1137. irq_startup(desc, true);
  1138. else
  1139. /* Undo nested disables: */
  1140. desc->depth = 1;
  1141. /* Exclude IRQ from balancing if requested */
  1142. if (new->flags & IRQF_NOBALANCING) {
  1143. irq_settings_set_no_balancing(desc);
  1144. irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
  1145. }
  1146. /* Set default affinity mask once everything is setup */
  1147. setup_affinity(desc, mask);
  1148. } else if (new->flags & IRQF_TRIGGER_MASK) {
  1149. unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
  1150. unsigned int omsk = irq_settings_get_trigger_mask(desc);
  1151. if (nmsk != omsk)
  1152. /* hope the handler works with current trigger mode */
  1153. pr_warning("irq %d uses trigger mode %u; requested %u\n",
  1154. irq, nmsk, omsk);
  1155. }
  1156. *old_ptr = new;
  1157. irq_pm_install_action(desc, new);
  1158. /* Reset broken irq detection when installing new handler */
  1159. desc->irq_count = 0;
  1160. desc->irqs_unhandled = 0;
  1161. /*
  1162. * Check whether we disabled the irq via the spurious handler
  1163. * before. Reenable it and give it another chance.
  1164. */
  1165. if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
  1166. desc->istate &= ~IRQS_SPURIOUS_DISABLED;
  1167. __enable_irq(desc);
  1168. }
  1169. raw_spin_unlock_irqrestore(&desc->lock, flags);
  1170. /*
  1171. * Strictly no need to wake it up, but hung_task complains
  1172. * when no hard interrupt wakes the thread up.
  1173. */
  1174. if (new->thread)
  1175. wake_up_process(new->thread);
  1176. if (new->secondary)
  1177. wake_up_process(new->secondary->thread);
  1178. register_irq_proc(irq, desc);
  1179. new->dir = NULL;
  1180. register_handler_proc(irq, new);
  1181. free_cpumask_var(mask);
  1182. return 0;
  1183. mismatch:
  1184. if (!(new->flags & IRQF_PROBE_SHARED)) {
  1185. pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
  1186. irq, new->flags, new->name, old->flags, old->name);
  1187. #ifdef CONFIG_DEBUG_SHIRQ
  1188. dump_stack();
  1189. #endif
  1190. }
  1191. ret = -EBUSY;
  1192. out_mask:
  1193. raw_spin_unlock_irqrestore(&desc->lock, flags);
  1194. free_cpumask_var(mask);
  1195. out_thread:
  1196. if (new->thread) {
  1197. struct task_struct *t = new->thread;
  1198. new->thread = NULL;
  1199. kthread_stop(t);
  1200. put_task_struct(t);
  1201. }
  1202. if (new->secondary && new->secondary->thread) {
  1203. struct task_struct *t = new->secondary->thread;
  1204. new->secondary->thread = NULL;
  1205. kthread_stop(t);
  1206. put_task_struct(t);
  1207. }
  1208. out_mput:
  1209. module_put(desc->owner);
  1210. return ret;
  1211. }
  1212. /**
  1213. * setup_irq - setup an interrupt
  1214. * @irq: Interrupt line to setup
  1215. * @act: irqaction for the interrupt
  1216. *
  1217. * Used to statically setup interrupts in the early boot process.
  1218. */
  1219. int setup_irq(unsigned int irq, struct irqaction *act)
  1220. {
  1221. int retval;
  1222. struct irq_desc *desc = irq_to_desc(irq);
  1223. if (WARN_ON(irq_settings_is_per_cpu_devid(desc)))
  1224. return -EINVAL;
  1225. chip_bus_lock(desc);
  1226. retval = __setup_irq(irq, desc, act);
  1227. chip_bus_sync_unlock(desc);
  1228. return retval;
  1229. }
  1230. EXPORT_SYMBOL_GPL(setup_irq);
  1231. /*
  1232. * Internal function to unregister an irqaction - used to free
  1233. * regular and special interrupts that are part of the architecture.
  1234. */
  1235. static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
  1236. {
  1237. struct irq_desc *desc = irq_to_desc(irq);
  1238. struct irqaction *action, **action_ptr;
  1239. unsigned long flags;
  1240. WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
  1241. if (!desc)
  1242. return NULL;
  1243. chip_bus_lock(desc);
  1244. raw_spin_lock_irqsave(&desc->lock, flags);
  1245. /*
  1246. * There can be multiple actions per IRQ descriptor, find the right
  1247. * one based on the dev_id:
  1248. */
  1249. action_ptr = &desc->action;
  1250. for (;;) {
  1251. action = *action_ptr;
  1252. if (!action) {
  1253. WARN(1, "Trying to free already-free IRQ %d\n", irq);
  1254. raw_spin_unlock_irqrestore(&desc->lock, flags);
  1255. chip_bus_sync_unlock(desc);
  1256. return NULL;
  1257. }
  1258. if (action->dev_id == dev_id)
  1259. break;
  1260. action_ptr = &action->next;
  1261. }
  1262. /* Found it - now remove it from the list of entries: */
  1263. *action_ptr = action->next;
  1264. irq_pm_remove_action(desc, action);
  1265. /* If this was the last handler, shut down the IRQ line: */
  1266. if (!desc->action) {
  1267. irq_settings_clr_disable_unlazy(desc);
  1268. irq_shutdown(desc);
  1269. irq_release_resources(desc);
  1270. }
  1271. #ifdef CONFIG_SMP
  1272. /* make sure affinity_hint is cleaned up */
  1273. if (WARN_ON_ONCE(desc->affinity_hint))
  1274. desc->affinity_hint = NULL;
  1275. #endif
  1276. raw_spin_unlock_irqrestore(&desc->lock, flags);
  1277. chip_bus_sync_unlock(desc);
  1278. unregister_handler_proc(irq, action);
  1279. /* Make sure it's not being used on another CPU: */
  1280. synchronize_irq(irq);
  1281. #ifdef CONFIG_DEBUG_SHIRQ
  1282. /*
  1283. * It's a shared IRQ -- the driver ought to be prepared for an IRQ
  1284. * event to happen even now it's being freed, so let's make sure that
  1285. * is so by doing an extra call to the handler ....
  1286. *
  1287. * ( We do this after actually deregistering it, to make sure that a
  1288. * 'real' IRQ doesn't run in * parallel with our fake. )
  1289. */
  1290. if (action->flags & IRQF_SHARED) {
  1291. local_irq_save(flags);
  1292. action->handler(irq, dev_id);
  1293. local_irq_restore(flags);
  1294. }
  1295. #endif
  1296. if (action->thread) {
  1297. kthread_stop(action->thread);
  1298. put_task_struct(action->thread);
  1299. if (action->secondary && action->secondary->thread) {
  1300. kthread_stop(action->secondary->thread);
  1301. put_task_struct(action->secondary->thread);
  1302. }
  1303. }
  1304. module_put(desc->owner);
  1305. kfree(action->secondary);
  1306. return action;
  1307. }
  1308. /**
  1309. * remove_irq - free an interrupt
  1310. * @irq: Interrupt line to free
  1311. * @act: irqaction for the interrupt
  1312. *
  1313. * Used to remove interrupts statically setup by the early boot process.
  1314. */
  1315. void remove_irq(unsigned int irq, struct irqaction *act)
  1316. {
  1317. struct irq_desc *desc = irq_to_desc(irq);
  1318. if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc)))
  1319. __free_irq(irq, act->dev_id);
  1320. }
  1321. EXPORT_SYMBOL_GPL(remove_irq);
  1322. /**
  1323. * free_irq - free an interrupt allocated with request_irq
  1324. * @irq: Interrupt line to free
  1325. * @dev_id: Device identity to free
  1326. *
  1327. * Remove an interrupt handler. The handler is removed and if the
  1328. * interrupt line is no longer in use by any driver it is disabled.
  1329. * On a shared IRQ the caller must ensure the interrupt is disabled
  1330. * on the card it drives before calling this function. The function
  1331. * does not return until any executing interrupts for this IRQ
  1332. * have completed.
  1333. *
  1334. * This function must not be called from interrupt context.
  1335. */
  1336. void free_irq(unsigned int irq, void *dev_id)
  1337. {
  1338. struct irq_desc *desc = irq_to_desc(irq);
  1339. if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
  1340. return;
  1341. #ifdef CONFIG_SMP
  1342. if (WARN_ON(desc->affinity_notify))
  1343. desc->affinity_notify = NULL;
  1344. #endif
  1345. kfree(__free_irq(irq, dev_id));
  1346. }
  1347. EXPORT_SYMBOL(free_irq);
  1348. /**
  1349. * request_threaded_irq - allocate an interrupt line
  1350. * @irq: Interrupt line to allocate
  1351. * @handler: Function to be called when the IRQ occurs.
  1352. * Primary handler for threaded interrupts
  1353. * If NULL and thread_fn != NULL the default
  1354. * primary handler is installed
  1355. * @thread_fn: Function called from the irq handler thread
  1356. * If NULL, no irq thread is created
  1357. * @irqflags: Interrupt type flags
  1358. * @devname: An ascii name for the claiming device
  1359. * @dev_id: A cookie passed back to the handler function
  1360. *
  1361. * This call allocates interrupt resources and enables the
  1362. * interrupt line and IRQ handling. From the point this
  1363. * call is made your handler function may be invoked. Since
  1364. * your handler function must clear any interrupt the board
  1365. * raises, you must take care both to initialise your hardware
  1366. * and to set up the interrupt handler in the right order.
  1367. *
  1368. * If you want to set up a threaded irq handler for your device
  1369. * then you need to supply @handler and @thread_fn. @handler is
  1370. * still called in hard interrupt context and has to check
  1371. * whether the interrupt originates from the device. If yes it
  1372. * needs to disable the interrupt on the device and return
  1373. * IRQ_WAKE_THREAD which will wake up the handler thread and run
  1374. * @thread_fn. This split handler design is necessary to support
  1375. * shared interrupts.
  1376. *
  1377. * Dev_id must be globally unique. Normally the address of the
  1378. * device data structure is used as the cookie. Since the handler
  1379. * receives this value it makes sense to use it.
  1380. *
  1381. * If your interrupt is shared you must pass a non NULL dev_id
  1382. * as this is required when freeing the interrupt.
  1383. *
  1384. * Flags:
  1385. *
  1386. * IRQF_SHARED Interrupt is shared
  1387. * IRQF_TRIGGER_* Specify active edge(s) or level
  1388. *
  1389. */
  1390. int request_threaded_irq(unsigned int irq, irq_handler_t handler,
  1391. irq_handler_t thread_fn, unsigned long irqflags,
  1392. const char *devname, void *dev_id)
  1393. {
  1394. struct irqaction *action;
  1395. struct irq_desc *desc;
  1396. int retval;
  1397. /*
  1398. * Sanity-check: shared interrupts must pass in a real dev-ID,
  1399. * otherwise we'll have trouble later trying to figure out
  1400. * which interrupt is which (messes up the interrupt freeing
  1401. * logic etc).
  1402. *
  1403. * Also IRQF_COND_SUSPEND only makes sense for shared interrupts and
  1404. * it cannot be set along with IRQF_NO_SUSPEND.
  1405. */
  1406. if (((irqflags & IRQF_SHARED) && !dev_id) ||
  1407. (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) ||
  1408. ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND)))
  1409. return -EINVAL;
  1410. desc = irq_to_desc(irq);
  1411. if (!desc)
  1412. return -EINVAL;
  1413. if (!irq_settings_can_request(desc) ||
  1414. WARN_ON(irq_settings_is_per_cpu_devid(desc)))
  1415. return -EINVAL;
  1416. if (!handler) {
  1417. if (!thread_fn)
  1418. return -EINVAL;
  1419. handler = irq_default_primary_handler;
  1420. }
  1421. action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
  1422. if (!action)
  1423. return -ENOMEM;
  1424. action->handler = handler;
  1425. action->thread_fn = thread_fn;
  1426. action->flags = irqflags;
  1427. action->name = devname;
  1428. action->dev_id = dev_id;
  1429. chip_bus_lock(desc);
  1430. retval = __setup_irq(irq, desc, action);
  1431. chip_bus_sync_unlock(desc);
  1432. if (retval) {
  1433. kfree(action->secondary);
  1434. kfree(action);
  1435. }
  1436. #ifdef CONFIG_DEBUG_SHIRQ_FIXME
  1437. if (!retval && (irqflags & IRQF_SHARED)) {
  1438. /*
  1439. * It's a shared IRQ -- the driver ought to be prepared for it
  1440. * to happen immediately, so let's make sure....
  1441. * We disable the irq to make sure that a 'real' IRQ doesn't
  1442. * run in parallel with our fake.
  1443. */
  1444. unsigned long flags;
  1445. disable_irq(irq);
  1446. local_irq_save(flags);
  1447. handler(irq, dev_id);
  1448. local_irq_restore(flags);
  1449. enable_irq(irq);
  1450. }
  1451. #endif
  1452. return retval;
  1453. }
  1454. EXPORT_SYMBOL(request_threaded_irq);
  1455. /**
  1456. * request_any_context_irq - allocate an interrupt line
  1457. * @irq: Interrupt line to allocate
  1458. * @handler: Function to be called when the IRQ occurs.
  1459. * Threaded handler for threaded interrupts.
  1460. * @flags: Interrupt type flags
  1461. * @name: An ascii name for the claiming device
  1462. * @dev_id: A cookie passed back to the handler function
  1463. *
  1464. * This call allocates interrupt resources and enables the
  1465. * interrupt line and IRQ handling. It selects either a
  1466. * hardirq or threaded handling method depending on the
  1467. * context.
  1468. *
  1469. * On failure, it returns a negative value. On success,
  1470. * it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
  1471. */
  1472. int request_any_context_irq(unsigned int irq, irq_handler_t handler,
  1473. unsigned long flags, const char *name, void *dev_id)
  1474. {
  1475. struct irq_desc *desc = irq_to_desc(irq);
  1476. int ret;
  1477. if (!desc)
  1478. return -EINVAL;
  1479. if (irq_settings_is_nested_thread(desc)) {
  1480. ret = request_threaded_irq(irq, NULL, handler,
  1481. flags, name, dev_id);
  1482. return !ret ? IRQC_IS_NESTED : ret;
  1483. }
  1484. ret = request_irq(irq, handler, flags, name, dev_id);
  1485. return !ret ? IRQC_IS_HARDIRQ : ret;
  1486. }
  1487. EXPORT_SYMBOL_GPL(request_any_context_irq);
  1488. void enable_percpu_irq(unsigned int irq, unsigned int type)
  1489. {
  1490. unsigned int cpu = smp_processor_id();
  1491. unsigned long flags;
  1492. struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
  1493. if (!desc)
  1494. return;
  1495. type &= IRQ_TYPE_SENSE_MASK;
  1496. if (type != IRQ_TYPE_NONE) {
  1497. int ret;
  1498. ret = __irq_set_trigger(desc, type);
  1499. if (ret) {
  1500. WARN(1, "failed to set type for IRQ%d\n", irq);
  1501. goto out;
  1502. }
  1503. }
  1504. irq_percpu_enable(desc, cpu);
  1505. out:
  1506. irq_put_desc_unlock(desc, flags);
  1507. }
  1508. EXPORT_SYMBOL_GPL(enable_percpu_irq);
  1509. /**
  1510. * irq_percpu_is_enabled - Check whether the per cpu irq is enabled
  1511. * @irq: Linux irq number to check for
  1512. *
  1513. * Must be called from a non migratable context. Returns the enable
  1514. * state of a per cpu interrupt on the current cpu.
  1515. */
  1516. bool irq_percpu_is_enabled(unsigned int irq)
  1517. {
  1518. unsigned int cpu = smp_processor_id();
  1519. struct irq_desc *desc;
  1520. unsigned long flags;
  1521. bool is_enabled;
  1522. desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
  1523. if (!desc)
  1524. return false;
  1525. is_enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
  1526. irq_put_desc_unlock(desc, flags);
  1527. return is_enabled;
  1528. }
  1529. EXPORT_SYMBOL_GPL(irq_percpu_is_enabled);
  1530. void disable_percpu_irq(unsigned int irq)
  1531. {
  1532. unsigned int cpu = smp_processor_id();
  1533. unsigned long flags;
  1534. struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
  1535. if (!desc)
  1536. return;
  1537. irq_percpu_disable(desc, cpu);
  1538. irq_put_desc_unlock(desc, flags);
  1539. }
  1540. EXPORT_SYMBOL_GPL(disable_percpu_irq);
  1541. /*
  1542. * Internal function to unregister a percpu irqaction.
  1543. */
  1544. static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
  1545. {
  1546. struct irq_desc *desc = irq_to_desc(irq);
  1547. struct irqaction *action;
  1548. unsigned long flags;
  1549. WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
  1550. if (!desc)
  1551. return NULL;
  1552. raw_spin_lock_irqsave(&desc->lock, flags);
  1553. action = desc->action;
  1554. if (!action || action->percpu_dev_id != dev_id) {
  1555. WARN(1, "Trying to free already-free IRQ %d\n", irq);
  1556. goto bad;
  1557. }
  1558. if (!cpumask_empty(desc->percpu_enabled)) {
  1559. WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
  1560. irq, cpumask_first(desc->percpu_enabled));
  1561. goto bad;
  1562. }
  1563. /* Found it - now remove it from the list of entries: */
  1564. desc->action = NULL;
  1565. raw_spin_unlock_irqrestore(&desc->lock, flags);
  1566. unregister_handler_proc(irq, action);
  1567. module_put(desc->owner);
  1568. return action;
  1569. bad:
  1570. raw_spin_unlock_irqrestore(&desc->lock, flags);
  1571. return NULL;
  1572. }
  1573. /**
  1574. * remove_percpu_irq - free a per-cpu interrupt
  1575. * @irq: Interrupt line to free
  1576. * @act: irqaction for the interrupt
  1577. *
  1578. * Used to remove interrupts statically setup by the early boot process.
  1579. */
  1580. void remove_percpu_irq(unsigned int irq, struct irqaction *act)
  1581. {
  1582. struct irq_desc *desc = irq_to_desc(irq);
  1583. if (desc && irq_settings_is_per_cpu_devid(desc))
  1584. __free_percpu_irq(irq, act->percpu_dev_id);
  1585. }
  1586. /**
  1587. * free_percpu_irq - free an interrupt allocated with request_percpu_irq
  1588. * @irq: Interrupt line to free
  1589. * @dev_id: Device identity to free
  1590. *
  1591. * Remove a percpu interrupt handler. The handler is removed, but
  1592. * the interrupt line is not disabled. This must be done on each
  1593. * CPU before calling this function. The function does not return
  1594. * until any executing interrupts for this IRQ have completed.
  1595. *
  1596. * This function must not be called from interrupt context.
  1597. */
  1598. void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
  1599. {
  1600. struct irq_desc *desc = irq_to_desc(irq);
  1601. if (!desc || !irq_settings_is_per_cpu_devid(desc))
  1602. return;
  1603. chip_bus_lock(desc);
  1604. kfree(__free_percpu_irq(irq, dev_id));
  1605. chip_bus_sync_unlock(desc);
  1606. }
  1607. EXPORT_SYMBOL_GPL(free_percpu_irq);
  1608. /**
  1609. * setup_percpu_irq - setup a per-cpu interrupt
  1610. * @irq: Interrupt line to setup
  1611. * @act: irqaction for the interrupt
  1612. *
  1613. * Used to statically setup per-cpu interrupts in the early boot process.
  1614. */
  1615. int setup_percpu_irq(unsigned int irq, struct irqaction *act)
  1616. {
  1617. struct irq_desc *desc = irq_to_desc(irq);
  1618. int retval;
  1619. if (!desc || !irq_settings_is_per_cpu_devid(desc))
  1620. return -EINVAL;
  1621. chip_bus_lock(desc);
  1622. retval = __setup_irq(irq, desc, act);
  1623. chip_bus_sync_unlock(desc);
  1624. return retval;
  1625. }
  1626. /**
  1627. * request_percpu_irq - allocate a percpu interrupt line
  1628. * @irq: Interrupt line to allocate
  1629. * @handler: Function to be called when the IRQ occurs.
  1630. * @devname: An ascii name for the claiming device
  1631. * @dev_id: A percpu cookie passed back to the handler function
  1632. *
  1633. * This call allocates interrupt resources and enables the
  1634. * interrupt on the local CPU. If the interrupt is supposed to be
  1635. * enabled on other CPUs, it has to be done on each CPU using
  1636. * enable_percpu_irq().
  1637. *
  1638. * Dev_id must be globally unique. It is a per-cpu variable, and
  1639. * the handler gets called with the interrupted CPU's instance of
  1640. * that variable.
  1641. */
  1642. int request_percpu_irq(unsigned int irq, irq_handler_t handler,
  1643. const char *devname, void __percpu *dev_id)
  1644. {
  1645. struct irqaction *action;
  1646. struct irq_desc *desc;
  1647. int retval;
  1648. if (!dev_id)
  1649. return -EINVAL;
  1650. desc = irq_to_desc(irq);
  1651. if (!desc || !irq_settings_can_request(desc) ||
  1652. !irq_settings_is_per_cpu_devid(desc))
  1653. return -EINVAL;
  1654. action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
  1655. if (!action)
  1656. return -ENOMEM;
  1657. action->handler = handler;
  1658. action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND;
  1659. action->name = devname;
  1660. action->percpu_dev_id = dev_id;
  1661. chip_bus_lock(desc);
  1662. retval = __setup_irq(irq, desc, action);
  1663. chip_bus_sync_unlock(desc);
  1664. if (retval)
  1665. kfree(action);
  1666. return retval;
  1667. }
  1668. EXPORT_SYMBOL_GPL(request_percpu_irq);
  1669. /**
  1670. * irq_get_irqchip_state - returns the irqchip state of a interrupt.
  1671. * @irq: Interrupt line that is forwarded to a VM
  1672. * @which: One of IRQCHIP_STATE_* the caller wants to know about
  1673. * @state: a pointer to a boolean where the state is to be storeed
  1674. *
  1675. * This call snapshots the internal irqchip state of an
  1676. * interrupt, returning into @state the bit corresponding to
  1677. * stage @which
  1678. *
  1679. * This function should be called with preemption disabled if the
  1680. * interrupt controller has per-cpu registers.
  1681. */
  1682. int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
  1683. bool *state)
  1684. {
  1685. struct irq_desc *desc;
  1686. struct irq_data *data;
  1687. struct irq_chip *chip;
  1688. unsigned long flags;
  1689. int err = -EINVAL;
  1690. desc = irq_get_desc_buslock(irq, &flags, 0);
  1691. if (!desc)
  1692. return err;
  1693. data = irq_desc_get_irq_data(desc);
  1694. do {
  1695. chip = irq_data_get_irq_chip(data);
  1696. if (chip->irq_get_irqchip_state)
  1697. break;
  1698. #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
  1699. data = data->parent_data;
  1700. #else
  1701. data = NULL;
  1702. #endif
  1703. } while (data);
  1704. if (data)
  1705. err = chip->irq_get_irqchip_state(data, which, state);
  1706. irq_put_desc_busunlock(desc, flags);
  1707. return err;
  1708. }
  1709. EXPORT_SYMBOL_GPL(irq_get_irqchip_state);
  1710. /**
  1711. * irq_set_irqchip_state - set the state of a forwarded interrupt.
  1712. * @irq: Interrupt line that is forwarded to a VM
  1713. * @which: State to be restored (one of IRQCHIP_STATE_*)
  1714. * @val: Value corresponding to @which
  1715. *
  1716. * This call sets the internal irqchip state of an interrupt,
  1717. * depending on the value of @which.
  1718. *
  1719. * This function should be called with preemption disabled if the
  1720. * interrupt controller has per-cpu registers.
  1721. */
  1722. int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
  1723. bool val)
  1724. {
  1725. struct irq_desc *desc;
  1726. struct irq_data *data;
  1727. struct irq_chip *chip;
  1728. unsigned long flags;
  1729. int err = -EINVAL;
  1730. desc = irq_get_desc_buslock(irq, &flags, 0);
  1731. if (!desc)
  1732. return err;
  1733. data = irq_desc_get_irq_data(desc);
  1734. do {
  1735. chip = irq_data_get_irq_chip(data);
  1736. if (chip->irq_set_irqchip_state)
  1737. break;
  1738. #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
  1739. data = data->parent_data;
  1740. #else
  1741. data = NULL;
  1742. #endif
  1743. } while (data);
  1744. if (data)
  1745. err = chip->irq_set_irqchip_state(data, which, val);
  1746. irq_put_desc_busunlock(desc, flags);
  1747. return err;
  1748. }
  1749. EXPORT_SYMBOL_GPL(irq_set_irqchip_state);