manage.c 45 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758
  1. /*
  2. * linux/kernel/irq/manage.c
  3. *
  4. * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
  5. * Copyright (C) 2005-2006 Thomas Gleixner
  6. *
  7. * This file contains driver APIs to the irq subsystem.
  8. */
  9. #define pr_fmt(fmt) "genirq: " fmt
  10. #include <linux/irq.h>
  11. #include <linux/kthread.h>
  12. #include <linux/module.h>
  13. #include <linux/random.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/slab.h>
  16. #include <linux/sched.h>
  17. #include <linux/sched/rt.h>
  18. #include <linux/task_work.h>
  19. #include "internals.h"
  20. #ifdef CONFIG_IRQ_FORCED_THREADING
  21. __read_mostly bool force_irqthreads;
  22. static int __init setup_forced_irqthreads(char *arg)
  23. {
  24. force_irqthreads = true;
  25. return 0;
  26. }
  27. early_param("threadirqs", setup_forced_irqthreads);
  28. #endif
  29. static void __synchronize_hardirq(struct irq_desc *desc)
  30. {
  31. bool inprogress;
  32. do {
  33. unsigned long flags;
  34. /*
  35. * Wait until we're out of the critical section. This might
  36. * give the wrong answer due to the lack of memory barriers.
  37. */
  38. while (irqd_irq_inprogress(&desc->irq_data))
  39. cpu_relax();
  40. /* Ok, that indicated we're done: double-check carefully. */
  41. raw_spin_lock_irqsave(&desc->lock, flags);
  42. inprogress = irqd_irq_inprogress(&desc->irq_data);
  43. raw_spin_unlock_irqrestore(&desc->lock, flags);
  44. /* Oops, that failed? */
  45. } while (inprogress);
  46. }
  47. /**
  48. * synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs)
  49. * @irq: interrupt number to wait for
  50. *
  51. * This function waits for any pending hard IRQ handlers for this
  52. * interrupt to complete before returning. If you use this
  53. * function while holding a resource the IRQ handler may need you
  54. * will deadlock. It does not take associated threaded handlers
  55. * into account.
  56. *
  57. * Do not use this for shutdown scenarios where you must be sure
  58. * that all parts (hardirq and threaded handler) have completed.
  59. *
  60. * This function may be called - with care - from IRQ context.
  61. */
  62. void synchronize_hardirq(unsigned int irq)
  63. {
  64. struct irq_desc *desc = irq_to_desc(irq);
  65. if (desc)
  66. __synchronize_hardirq(desc);
  67. }
  68. EXPORT_SYMBOL(synchronize_hardirq);
  69. /**
  70. * synchronize_irq - wait for pending IRQ handlers (on other CPUs)
  71. * @irq: interrupt number to wait for
  72. *
  73. * This function waits for any pending IRQ handlers for this interrupt
  74. * to complete before returning. If you use this function while
  75. * holding a resource the IRQ handler may need you will deadlock.
  76. *
  77. * This function may be called - with care - from IRQ context.
  78. */
  79. void synchronize_irq(unsigned int irq)
  80. {
  81. struct irq_desc *desc = irq_to_desc(irq);
  82. if (desc) {
  83. __synchronize_hardirq(desc);
  84. /*
  85. * We made sure that no hardirq handler is
  86. * running. Now verify that no threaded handlers are
  87. * active.
  88. */
  89. wait_event(desc->wait_for_threads,
  90. !atomic_read(&desc->threads_active));
  91. }
  92. }
  93. EXPORT_SYMBOL(synchronize_irq);
  94. #ifdef CONFIG_SMP
  95. cpumask_var_t irq_default_affinity;
  96. /**
  97. * irq_can_set_affinity - Check if the affinity of a given irq can be set
  98. * @irq: Interrupt to check
  99. *
  100. */
  101. int irq_can_set_affinity(unsigned int irq)
  102. {
  103. struct irq_desc *desc = irq_to_desc(irq);
  104. if (!desc || !irqd_can_balance(&desc->irq_data) ||
  105. !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
  106. return 0;
  107. return 1;
  108. }
  109. /**
  110. * irq_set_thread_affinity - Notify irq threads to adjust affinity
  111. * @desc: irq descriptor which has affitnity changed
  112. *
  113. * We just set IRQTF_AFFINITY and delegate the affinity setting
  114. * to the interrupt thread itself. We can not call
  115. * set_cpus_allowed_ptr() here as we hold desc->lock and this
  116. * code can be called from hard interrupt context.
  117. */
  118. void irq_set_thread_affinity(struct irq_desc *desc)
  119. {
  120. struct irqaction *action = desc->action;
  121. while (action) {
  122. if (action->thread)
  123. set_bit(IRQTF_AFFINITY, &action->thread_flags);
  124. action = action->next;
  125. }
  126. }
  127. #ifdef CONFIG_GENERIC_PENDING_IRQ
  128. static inline bool irq_can_move_pcntxt(struct irq_data *data)
  129. {
  130. return irqd_can_move_in_process_context(data);
  131. }
  132. static inline bool irq_move_pending(struct irq_data *data)
  133. {
  134. return irqd_is_setaffinity_pending(data);
  135. }
  136. static inline void
  137. irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask)
  138. {
  139. cpumask_copy(desc->pending_mask, mask);
  140. }
  141. static inline void
  142. irq_get_pending(struct cpumask *mask, struct irq_desc *desc)
  143. {
  144. cpumask_copy(mask, desc->pending_mask);
  145. }
  146. #else
  147. static inline bool irq_can_move_pcntxt(struct irq_data *data) { return true; }
  148. static inline bool irq_move_pending(struct irq_data *data) { return false; }
  149. static inline void
  150. irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { }
  151. static inline void
  152. irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
  153. #endif
  154. int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
  155. bool force)
  156. {
  157. struct irq_desc *desc = irq_data_to_desc(data);
  158. struct irq_chip *chip = irq_data_get_irq_chip(data);
  159. int ret;
  160. ret = chip->irq_set_affinity(data, mask, force);
  161. switch (ret) {
  162. case IRQ_SET_MASK_OK:
  163. cpumask_copy(data->affinity, mask);
  164. case IRQ_SET_MASK_OK_NOCOPY:
  165. irq_set_thread_affinity(desc);
  166. ret = 0;
  167. }
  168. return ret;
  169. }
  170. int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
  171. bool force)
  172. {
  173. struct irq_chip *chip = irq_data_get_irq_chip(data);
  174. struct irq_desc *desc = irq_data_to_desc(data);
  175. int ret = 0;
  176. if (!chip || !chip->irq_set_affinity)
  177. return -EINVAL;
  178. if (irq_can_move_pcntxt(data)) {
  179. ret = irq_do_set_affinity(data, mask, force);
  180. } else {
  181. irqd_set_move_pending(data);
  182. irq_copy_pending(desc, mask);
  183. }
  184. if (desc->affinity_notify) {
  185. kref_get(&desc->affinity_notify->kref);
  186. schedule_work(&desc->affinity_notify->work);
  187. }
  188. irqd_set(data, IRQD_AFFINITY_SET);
  189. return ret;
  190. }
  191. int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
  192. {
  193. struct irq_desc *desc = irq_to_desc(irq);
  194. unsigned long flags;
  195. int ret;
  196. if (!desc)
  197. return -EINVAL;
  198. raw_spin_lock_irqsave(&desc->lock, flags);
  199. ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
  200. raw_spin_unlock_irqrestore(&desc->lock, flags);
  201. return ret;
  202. }
  203. int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
  204. {
  205. unsigned long flags;
  206. struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
  207. if (!desc)
  208. return -EINVAL;
  209. desc->affinity_hint = m;
  210. irq_put_desc_unlock(desc, flags);
  211. return 0;
  212. }
  213. EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
  214. static void irq_affinity_notify(struct work_struct *work)
  215. {
  216. struct irq_affinity_notify *notify =
  217. container_of(work, struct irq_affinity_notify, work);
  218. struct irq_desc *desc = irq_to_desc(notify->irq);
  219. cpumask_var_t cpumask;
  220. unsigned long flags;
  221. if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
  222. goto out;
  223. raw_spin_lock_irqsave(&desc->lock, flags);
  224. if (irq_move_pending(&desc->irq_data))
  225. irq_get_pending(cpumask, desc);
  226. else
  227. cpumask_copy(cpumask, desc->irq_data.affinity);
  228. raw_spin_unlock_irqrestore(&desc->lock, flags);
  229. notify->notify(notify, cpumask);
  230. free_cpumask_var(cpumask);
  231. out:
  232. kref_put(&notify->kref, notify->release);
  233. }
  234. /**
  235. * irq_set_affinity_notifier - control notification of IRQ affinity changes
  236. * @irq: Interrupt for which to enable/disable notification
  237. * @notify: Context for notification, or %NULL to disable
  238. * notification. Function pointers must be initialised;
  239. * the other fields will be initialised by this function.
  240. *
  241. * Must be called in process context. Notification may only be enabled
  242. * after the IRQ is allocated and must be disabled before the IRQ is
  243. * freed using free_irq().
  244. */
  245. int
  246. irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
  247. {
  248. struct irq_desc *desc = irq_to_desc(irq);
  249. struct irq_affinity_notify *old_notify;
  250. unsigned long flags;
  251. /* The release function is promised process context */
  252. might_sleep();
  253. if (!desc)
  254. return -EINVAL;
  255. /* Complete initialisation of *notify */
  256. if (notify) {
  257. notify->irq = irq;
  258. kref_init(&notify->kref);
  259. INIT_WORK(&notify->work, irq_affinity_notify);
  260. }
  261. raw_spin_lock_irqsave(&desc->lock, flags);
  262. old_notify = desc->affinity_notify;
  263. desc->affinity_notify = notify;
  264. raw_spin_unlock_irqrestore(&desc->lock, flags);
  265. if (old_notify)
  266. kref_put(&old_notify->kref, old_notify->release);
  267. return 0;
  268. }
  269. EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
  270. #ifndef CONFIG_AUTO_IRQ_AFFINITY
  271. /*
  272. * Generic version of the affinity autoselector.
  273. */
  274. static int
  275. setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
  276. {
  277. struct cpumask *set = irq_default_affinity;
  278. int node = desc->irq_data.node;
  279. /* Excludes PER_CPU and NO_BALANCE interrupts */
  280. if (!irq_can_set_affinity(irq))
  281. return 0;
  282. /*
  283. * Preserve an userspace affinity setup, but make sure that
  284. * one of the targets is online.
  285. */
  286. if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
  287. if (cpumask_intersects(desc->irq_data.affinity,
  288. cpu_online_mask))
  289. set = desc->irq_data.affinity;
  290. else
  291. irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
  292. }
  293. cpumask_and(mask, cpu_online_mask, set);
  294. if (node != NUMA_NO_NODE) {
  295. const struct cpumask *nodemask = cpumask_of_node(node);
  296. /* make sure at least one of the cpus in nodemask is online */
  297. if (cpumask_intersects(mask, nodemask))
  298. cpumask_and(mask, mask, nodemask);
  299. }
  300. irq_do_set_affinity(&desc->irq_data, mask, false);
  301. return 0;
  302. }
  303. #else
  304. static inline int
  305. setup_affinity(unsigned int irq, struct irq_desc *d, struct cpumask *mask)
  306. {
  307. return irq_select_affinity(irq);
  308. }
  309. #endif
  310. /*
  311. * Called when affinity is set via /proc/irq
  312. */
  313. int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask)
  314. {
  315. struct irq_desc *desc = irq_to_desc(irq);
  316. unsigned long flags;
  317. int ret;
  318. raw_spin_lock_irqsave(&desc->lock, flags);
  319. ret = setup_affinity(irq, desc, mask);
  320. raw_spin_unlock_irqrestore(&desc->lock, flags);
  321. return ret;
  322. }
  323. #else
  324. static inline int
  325. setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
  326. {
  327. return 0;
  328. }
  329. #endif
  330. void __disable_irq(struct irq_desc *desc, unsigned int irq)
  331. {
  332. if (!desc->depth++)
  333. irq_disable(desc);
  334. }
  335. static int __disable_irq_nosync(unsigned int irq)
  336. {
  337. unsigned long flags;
  338. struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
  339. if (!desc)
  340. return -EINVAL;
  341. __disable_irq(desc, irq);
  342. irq_put_desc_busunlock(desc, flags);
  343. return 0;
  344. }
  345. /**
  346. * disable_irq_nosync - disable an irq without waiting
  347. * @irq: Interrupt to disable
  348. *
  349. * Disable the selected interrupt line. Disables and Enables are
  350. * nested.
  351. * Unlike disable_irq(), this function does not ensure existing
  352. * instances of the IRQ handler have completed before returning.
  353. *
  354. * This function may be called from IRQ context.
  355. */
  356. void disable_irq_nosync(unsigned int irq)
  357. {
  358. __disable_irq_nosync(irq);
  359. }
  360. EXPORT_SYMBOL(disable_irq_nosync);
  361. /**
  362. * disable_irq - disable an irq and wait for completion
  363. * @irq: Interrupt to disable
  364. *
  365. * Disable the selected interrupt line. Enables and Disables are
  366. * nested.
  367. * This function waits for any pending IRQ handlers for this interrupt
  368. * to complete before returning. If you use this function while
  369. * holding a resource the IRQ handler may need you will deadlock.
  370. *
  371. * This function may be called - with care - from IRQ context.
  372. */
  373. void disable_irq(unsigned int irq)
  374. {
  375. if (!__disable_irq_nosync(irq))
  376. synchronize_irq(irq);
  377. }
  378. EXPORT_SYMBOL(disable_irq);
  379. void __enable_irq(struct irq_desc *desc, unsigned int irq)
  380. {
  381. switch (desc->depth) {
  382. case 0:
  383. err_out:
  384. WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq);
  385. break;
  386. case 1: {
  387. if (desc->istate & IRQS_SUSPENDED)
  388. goto err_out;
  389. /* Prevent probing on this irq: */
  390. irq_settings_set_noprobe(desc);
  391. irq_enable(desc);
  392. check_irq_resend(desc, irq);
  393. /* fall-through */
  394. }
  395. default:
  396. desc->depth--;
  397. }
  398. }
  399. /**
  400. * enable_irq - enable handling of an irq
  401. * @irq: Interrupt to enable
  402. *
  403. * Undoes the effect of one call to disable_irq(). If this
  404. * matches the last disable, processing of interrupts on this
  405. * IRQ line is re-enabled.
  406. *
  407. * This function may be called from IRQ context only when
  408. * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
  409. */
  410. void enable_irq(unsigned int irq)
  411. {
  412. unsigned long flags;
  413. struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
  414. if (!desc)
  415. return;
  416. if (WARN(!desc->irq_data.chip,
  417. KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
  418. goto out;
  419. __enable_irq(desc, irq);
  420. out:
  421. irq_put_desc_busunlock(desc, flags);
  422. }
  423. EXPORT_SYMBOL(enable_irq);
  424. static int set_irq_wake_real(unsigned int irq, unsigned int on)
  425. {
  426. struct irq_desc *desc = irq_to_desc(irq);
  427. int ret = -ENXIO;
  428. if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE)
  429. return 0;
  430. if (desc->irq_data.chip->irq_set_wake)
  431. ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
  432. return ret;
  433. }
  434. /**
  435. * irq_set_irq_wake - control irq power management wakeup
  436. * @irq: interrupt to control
  437. * @on: enable/disable power management wakeup
  438. *
  439. * Enable/disable power management wakeup mode, which is
  440. * disabled by default. Enables and disables must match,
  441. * just as they match for non-wakeup mode support.
  442. *
  443. * Wakeup mode lets this IRQ wake the system from sleep
  444. * states like "suspend to RAM".
  445. */
  446. int irq_set_irq_wake(unsigned int irq, unsigned int on)
  447. {
  448. unsigned long flags;
  449. struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
  450. int ret = 0;
  451. if (!desc)
  452. return -EINVAL;
  453. /* wakeup-capable irqs can be shared between drivers that
  454. * don't need to have the same sleep mode behaviors.
  455. */
  456. if (on) {
  457. if (desc->wake_depth++ == 0) {
  458. ret = set_irq_wake_real(irq, on);
  459. if (ret)
  460. desc->wake_depth = 0;
  461. else
  462. irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
  463. }
  464. } else {
  465. if (desc->wake_depth == 0) {
  466. WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
  467. } else if (--desc->wake_depth == 0) {
  468. ret = set_irq_wake_real(irq, on);
  469. if (ret)
  470. desc->wake_depth = 1;
  471. else
  472. irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
  473. }
  474. }
  475. irq_put_desc_busunlock(desc, flags);
  476. return ret;
  477. }
  478. EXPORT_SYMBOL(irq_set_irq_wake);
  479. /*
  480. * Internal function that tells the architecture code whether a
  481. * particular irq has been exclusively allocated or is available
  482. * for driver use.
  483. */
  484. int can_request_irq(unsigned int irq, unsigned long irqflags)
  485. {
  486. unsigned long flags;
  487. struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
  488. int canrequest = 0;
  489. if (!desc)
  490. return 0;
  491. if (irq_settings_can_request(desc)) {
  492. if (!desc->action ||
  493. irqflags & desc->action->flags & IRQF_SHARED)
  494. canrequest = 1;
  495. }
  496. irq_put_desc_unlock(desc, flags);
  497. return canrequest;
  498. }
  499. int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
  500. unsigned long flags)
  501. {
  502. struct irq_chip *chip = desc->irq_data.chip;
  503. int ret, unmask = 0;
  504. if (!chip || !chip->irq_set_type) {
  505. /*
  506. * IRQF_TRIGGER_* but the PIC does not support multiple
  507. * flow-types?
  508. */
  509. pr_debug("No set_type function for IRQ %d (%s)\n", irq,
  510. chip ? (chip->name ? : "unknown") : "unknown");
  511. return 0;
  512. }
  513. flags &= IRQ_TYPE_SENSE_MASK;
  514. if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
  515. if (!irqd_irq_masked(&desc->irq_data))
  516. mask_irq(desc);
  517. if (!irqd_irq_disabled(&desc->irq_data))
  518. unmask = 1;
  519. }
  520. /* caller masked out all except trigger mode flags */
  521. ret = chip->irq_set_type(&desc->irq_data, flags);
  522. switch (ret) {
  523. case IRQ_SET_MASK_OK:
  524. irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
  525. irqd_set(&desc->irq_data, flags);
  526. case IRQ_SET_MASK_OK_NOCOPY:
  527. flags = irqd_get_trigger_type(&desc->irq_data);
  528. irq_settings_set_trigger_mask(desc, flags);
  529. irqd_clear(&desc->irq_data, IRQD_LEVEL);
  530. irq_settings_clr_level(desc);
  531. if (flags & IRQ_TYPE_LEVEL_MASK) {
  532. irq_settings_set_level(desc);
  533. irqd_set(&desc->irq_data, IRQD_LEVEL);
  534. }
  535. ret = 0;
  536. break;
  537. default:
  538. pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n",
  539. flags, irq, chip->irq_set_type);
  540. }
  541. if (unmask)
  542. unmask_irq(desc);
  543. return ret;
  544. }
  545. #ifdef CONFIG_HARDIRQS_SW_RESEND
  546. int irq_set_parent(int irq, int parent_irq)
  547. {
  548. unsigned long flags;
  549. struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
  550. if (!desc)
  551. return -EINVAL;
  552. desc->parent_irq = parent_irq;
  553. irq_put_desc_unlock(desc, flags);
  554. return 0;
  555. }
  556. #endif
  557. /*
  558. * Default primary interrupt handler for threaded interrupts. Is
  559. * assigned as primary handler when request_threaded_irq is called
  560. * with handler == NULL. Useful for oneshot interrupts.
  561. */
  562. static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
  563. {
  564. return IRQ_WAKE_THREAD;
  565. }
  566. /*
  567. * Primary handler for nested threaded interrupts. Should never be
  568. * called.
  569. */
  570. static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
  571. {
  572. WARN(1, "Primary handler called for nested irq %d\n", irq);
  573. return IRQ_NONE;
  574. }
  575. static int irq_wait_for_interrupt(struct irqaction *action)
  576. {
  577. set_current_state(TASK_INTERRUPTIBLE);
  578. while (!kthread_should_stop()) {
  579. if (test_and_clear_bit(IRQTF_RUNTHREAD,
  580. &action->thread_flags)) {
  581. __set_current_state(TASK_RUNNING);
  582. return 0;
  583. }
  584. schedule();
  585. set_current_state(TASK_INTERRUPTIBLE);
  586. }
  587. __set_current_state(TASK_RUNNING);
  588. return -1;
  589. }
  590. /*
  591. * Oneshot interrupts keep the irq line masked until the threaded
  592. * handler finished. unmask if the interrupt has not been disabled and
  593. * is marked MASKED.
  594. */
  595. static void irq_finalize_oneshot(struct irq_desc *desc,
  596. struct irqaction *action)
  597. {
  598. if (!(desc->istate & IRQS_ONESHOT))
  599. return;
  600. again:
  601. chip_bus_lock(desc);
  602. raw_spin_lock_irq(&desc->lock);
  603. /*
  604. * Implausible though it may be we need to protect us against
  605. * the following scenario:
  606. *
  607. * The thread is faster done than the hard interrupt handler
  608. * on the other CPU. If we unmask the irq line then the
  609. * interrupt can come in again and masks the line, leaves due
  610. * to IRQS_INPROGRESS and the irq line is masked forever.
  611. *
  612. * This also serializes the state of shared oneshot handlers
  613. * versus "desc->threads_onehsot |= action->thread_mask;" in
  614. * irq_wake_thread(). See the comment there which explains the
  615. * serialization.
  616. */
  617. if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
  618. raw_spin_unlock_irq(&desc->lock);
  619. chip_bus_sync_unlock(desc);
  620. cpu_relax();
  621. goto again;
  622. }
  623. /*
  624. * Now check again, whether the thread should run. Otherwise
  625. * we would clear the threads_oneshot bit of this thread which
  626. * was just set.
  627. */
  628. if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
  629. goto out_unlock;
  630. desc->threads_oneshot &= ~action->thread_mask;
  631. if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
  632. irqd_irq_masked(&desc->irq_data))
  633. unmask_threaded_irq(desc);
  634. out_unlock:
  635. raw_spin_unlock_irq(&desc->lock);
  636. chip_bus_sync_unlock(desc);
  637. }
  638. #ifdef CONFIG_SMP
  639. /*
  640. * Check whether we need to change the affinity of the interrupt thread.
  641. */
  642. static void
  643. irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
  644. {
  645. cpumask_var_t mask;
  646. bool valid = true;
  647. if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
  648. return;
  649. /*
  650. * In case we are out of memory we set IRQTF_AFFINITY again and
  651. * try again next time
  652. */
  653. if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
  654. set_bit(IRQTF_AFFINITY, &action->thread_flags);
  655. return;
  656. }
  657. raw_spin_lock_irq(&desc->lock);
  658. /*
  659. * This code is triggered unconditionally. Check the affinity
  660. * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
  661. */
  662. if (desc->irq_data.affinity)
  663. cpumask_copy(mask, desc->irq_data.affinity);
  664. else
  665. valid = false;
  666. raw_spin_unlock_irq(&desc->lock);
  667. if (valid)
  668. set_cpus_allowed_ptr(current, mask);
  669. free_cpumask_var(mask);
  670. }
  671. #else
  672. static inline void
  673. irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
  674. #endif
  675. /*
  676. * Interrupts which are not explicitely requested as threaded
  677. * interrupts rely on the implicit bh/preempt disable of the hard irq
  678. * context. So we need to disable bh here to avoid deadlocks and other
  679. * side effects.
  680. */
  681. static irqreturn_t
  682. irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
  683. {
  684. irqreturn_t ret;
  685. local_bh_disable();
  686. ret = action->thread_fn(action->irq, action->dev_id);
  687. irq_finalize_oneshot(desc, action);
  688. local_bh_enable();
  689. return ret;
  690. }
  691. /*
  692. * Interrupts explicitly requested as threaded interrupts want to be
  693. * preemtible - many of them need to sleep and wait for slow busses to
  694. * complete.
  695. */
  696. static irqreturn_t irq_thread_fn(struct irq_desc *desc,
  697. struct irqaction *action)
  698. {
  699. irqreturn_t ret;
  700. ret = action->thread_fn(action->irq, action->dev_id);
  701. irq_finalize_oneshot(desc, action);
  702. return ret;
  703. }
  704. static void wake_threads_waitq(struct irq_desc *desc)
  705. {
  706. if (atomic_dec_and_test(&desc->threads_active))
  707. wake_up(&desc->wait_for_threads);
  708. }
  709. static void irq_thread_dtor(struct callback_head *unused)
  710. {
  711. struct task_struct *tsk = current;
  712. struct irq_desc *desc;
  713. struct irqaction *action;
  714. if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
  715. return;
  716. action = kthread_data(tsk);
  717. pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
  718. tsk->comm, tsk->pid, action->irq);
  719. desc = irq_to_desc(action->irq);
  720. /*
  721. * If IRQTF_RUNTHREAD is set, we need to decrement
  722. * desc->threads_active and wake possible waiters.
  723. */
  724. if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
  725. wake_threads_waitq(desc);
  726. /* Prevent a stale desc->threads_oneshot */
  727. irq_finalize_oneshot(desc, action);
  728. }
  729. /*
  730. * Interrupt handler thread
  731. */
  732. static int irq_thread(void *data)
  733. {
  734. struct callback_head on_exit_work;
  735. struct irqaction *action = data;
  736. struct irq_desc *desc = irq_to_desc(action->irq);
  737. irqreturn_t (*handler_fn)(struct irq_desc *desc,
  738. struct irqaction *action);
  739. if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
  740. &action->thread_flags))
  741. handler_fn = irq_forced_thread_fn;
  742. else
  743. handler_fn = irq_thread_fn;
  744. init_task_work(&on_exit_work, irq_thread_dtor);
  745. task_work_add(current, &on_exit_work, false);
  746. irq_thread_check_affinity(desc, action);
  747. while (!irq_wait_for_interrupt(action)) {
  748. irqreturn_t action_ret;
  749. irq_thread_check_affinity(desc, action);
  750. action_ret = handler_fn(desc, action);
  751. if (action_ret == IRQ_HANDLED)
  752. atomic_inc(&desc->threads_handled);
  753. wake_threads_waitq(desc);
  754. }
  755. /*
  756. * This is the regular exit path. __free_irq() is stopping the
  757. * thread via kthread_stop() after calling
  758. * synchronize_irq(). So neither IRQTF_RUNTHREAD nor the
  759. * oneshot mask bit can be set. We cannot verify that as we
  760. * cannot touch the oneshot mask at this point anymore as
  761. * __setup_irq() might have given out currents thread_mask
  762. * again.
  763. */
  764. task_work_cancel(current, irq_thread_dtor);
  765. return 0;
  766. }
  767. /**
  768. * irq_wake_thread - wake the irq thread for the action identified by dev_id
  769. * @irq: Interrupt line
  770. * @dev_id: Device identity for which the thread should be woken
  771. *
  772. */
  773. void irq_wake_thread(unsigned int irq, void *dev_id)
  774. {
  775. struct irq_desc *desc = irq_to_desc(irq);
  776. struct irqaction *action;
  777. unsigned long flags;
  778. if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
  779. return;
  780. raw_spin_lock_irqsave(&desc->lock, flags);
  781. for (action = desc->action; action; action = action->next) {
  782. if (action->dev_id == dev_id) {
  783. if (action->thread)
  784. __irq_wake_thread(desc, action);
  785. break;
  786. }
  787. }
  788. raw_spin_unlock_irqrestore(&desc->lock, flags);
  789. }
  790. EXPORT_SYMBOL_GPL(irq_wake_thread);
  791. static void irq_setup_forced_threading(struct irqaction *new)
  792. {
  793. if (!force_irqthreads)
  794. return;
  795. if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
  796. return;
  797. new->flags |= IRQF_ONESHOT;
  798. if (!new->thread_fn) {
  799. set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
  800. new->thread_fn = new->handler;
  801. new->handler = irq_default_primary_handler;
  802. }
  803. }
  804. static int irq_request_resources(struct irq_desc *desc)
  805. {
  806. struct irq_data *d = &desc->irq_data;
  807. struct irq_chip *c = d->chip;
  808. return c->irq_request_resources ? c->irq_request_resources(d) : 0;
  809. }
  810. static void irq_release_resources(struct irq_desc *desc)
  811. {
  812. struct irq_data *d = &desc->irq_data;
  813. struct irq_chip *c = d->chip;
  814. if (c->irq_release_resources)
  815. c->irq_release_resources(d);
  816. }
  817. /*
  818. * Internal function to register an irqaction - typically used to
  819. * allocate special interrupts that are part of the architecture.
  820. */
  821. static int
  822. __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
  823. {
  824. struct irqaction *old, **old_ptr;
  825. unsigned long flags, thread_mask = 0;
  826. int ret, nested, shared = 0;
  827. cpumask_var_t mask;
  828. if (!desc)
  829. return -EINVAL;
  830. if (desc->irq_data.chip == &no_irq_chip)
  831. return -ENOSYS;
  832. if (!try_module_get(desc->owner))
  833. return -ENODEV;
  834. /*
  835. * Check whether the interrupt nests into another interrupt
  836. * thread.
  837. */
  838. nested = irq_settings_is_nested_thread(desc);
  839. if (nested) {
  840. if (!new->thread_fn) {
  841. ret = -EINVAL;
  842. goto out_mput;
  843. }
  844. /*
  845. * Replace the primary handler which was provided from
  846. * the driver for non nested interrupt handling by the
  847. * dummy function which warns when called.
  848. */
  849. new->handler = irq_nested_primary_handler;
  850. } else {
  851. if (irq_settings_can_thread(desc))
  852. irq_setup_forced_threading(new);
  853. }
  854. /*
  855. * Create a handler thread when a thread function is supplied
  856. * and the interrupt does not nest into another interrupt
  857. * thread.
  858. */
  859. if (new->thread_fn && !nested) {
  860. struct task_struct *t;
  861. static const struct sched_param param = {
  862. .sched_priority = MAX_USER_RT_PRIO/2,
  863. };
  864. t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
  865. new->name);
  866. if (IS_ERR(t)) {
  867. ret = PTR_ERR(t);
  868. goto out_mput;
  869. }
  870. sched_setscheduler_nocheck(t, SCHED_FIFO, &param);
  871. /*
  872. * We keep the reference to the task struct even if
  873. * the thread dies to avoid that the interrupt code
  874. * references an already freed task_struct.
  875. */
  876. get_task_struct(t);
  877. new->thread = t;
  878. /*
  879. * Tell the thread to set its affinity. This is
  880. * important for shared interrupt handlers as we do
  881. * not invoke setup_affinity() for the secondary
  882. * handlers as everything is already set up. Even for
  883. * interrupts marked with IRQF_NO_BALANCE this is
  884. * correct as we want the thread to move to the cpu(s)
  885. * on which the requesting code placed the interrupt.
  886. */
  887. set_bit(IRQTF_AFFINITY, &new->thread_flags);
  888. }
  889. if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
  890. ret = -ENOMEM;
  891. goto out_thread;
  892. }
  893. /*
  894. * Drivers are often written to work w/o knowledge about the
  895. * underlying irq chip implementation, so a request for a
  896. * threaded irq without a primary hard irq context handler
  897. * requires the ONESHOT flag to be set. Some irq chips like
  898. * MSI based interrupts are per se one shot safe. Check the
  899. * chip flags, so we can avoid the unmask dance at the end of
  900. * the threaded handler for those.
  901. */
  902. if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
  903. new->flags &= ~IRQF_ONESHOT;
  904. /*
  905. * The following block of code has to be executed atomically
  906. */
  907. raw_spin_lock_irqsave(&desc->lock, flags);
  908. old_ptr = &desc->action;
  909. old = *old_ptr;
  910. if (old) {
  911. /*
  912. * Can't share interrupts unless both agree to and are
  913. * the same type (level, edge, polarity). So both flag
  914. * fields must have IRQF_SHARED set and the bits which
  915. * set the trigger type must match. Also all must
  916. * agree on ONESHOT.
  917. */
  918. if (!((old->flags & new->flags) & IRQF_SHARED) ||
  919. ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) ||
  920. ((old->flags ^ new->flags) & IRQF_ONESHOT))
  921. goto mismatch;
  922. /* All handlers must agree on per-cpuness */
  923. if ((old->flags & IRQF_PERCPU) !=
  924. (new->flags & IRQF_PERCPU))
  925. goto mismatch;
  926. /* add new interrupt at end of irq queue */
  927. do {
  928. /*
  929. * Or all existing action->thread_mask bits,
  930. * so we can find the next zero bit for this
  931. * new action.
  932. */
  933. thread_mask |= old->thread_mask;
  934. old_ptr = &old->next;
  935. old = *old_ptr;
  936. } while (old);
  937. shared = 1;
  938. }
  939. /*
  940. * Setup the thread mask for this irqaction for ONESHOT. For
  941. * !ONESHOT irqs the thread mask is 0 so we can avoid a
  942. * conditional in irq_wake_thread().
  943. */
  944. if (new->flags & IRQF_ONESHOT) {
  945. /*
  946. * Unlikely to have 32 resp 64 irqs sharing one line,
  947. * but who knows.
  948. */
  949. if (thread_mask == ~0UL) {
  950. ret = -EBUSY;
  951. goto out_mask;
  952. }
  953. /*
  954. * The thread_mask for the action is or'ed to
  955. * desc->thread_active to indicate that the
  956. * IRQF_ONESHOT thread handler has been woken, but not
  957. * yet finished. The bit is cleared when a thread
  958. * completes. When all threads of a shared interrupt
  959. * line have completed desc->threads_active becomes
  960. * zero and the interrupt line is unmasked. See
  961. * handle.c:irq_wake_thread() for further information.
  962. *
  963. * If no thread is woken by primary (hard irq context)
  964. * interrupt handlers, then desc->threads_active is
  965. * also checked for zero to unmask the irq line in the
  966. * affected hard irq flow handlers
  967. * (handle_[fasteoi|level]_irq).
  968. *
  969. * The new action gets the first zero bit of
  970. * thread_mask assigned. See the loop above which or's
  971. * all existing action->thread_mask bits.
  972. */
  973. new->thread_mask = 1 << ffz(thread_mask);
  974. } else if (new->handler == irq_default_primary_handler &&
  975. !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
  976. /*
  977. * The interrupt was requested with handler = NULL, so
  978. * we use the default primary handler for it. But it
  979. * does not have the oneshot flag set. In combination
  980. * with level interrupts this is deadly, because the
  981. * default primary handler just wakes the thread, then
  982. * the irq lines is reenabled, but the device still
  983. * has the level irq asserted. Rinse and repeat....
  984. *
  985. * While this works for edge type interrupts, we play
  986. * it safe and reject unconditionally because we can't
  987. * say for sure which type this interrupt really
  988. * has. The type flags are unreliable as the
  989. * underlying chip implementation can override them.
  990. */
  991. pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n",
  992. irq);
  993. ret = -EINVAL;
  994. goto out_mask;
  995. }
  996. if (!shared) {
  997. ret = irq_request_resources(desc);
  998. if (ret) {
  999. pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n",
  1000. new->name, irq, desc->irq_data.chip->name);
  1001. goto out_mask;
  1002. }
  1003. init_waitqueue_head(&desc->wait_for_threads);
  1004. /* Setup the type (level, edge polarity) if configured: */
  1005. if (new->flags & IRQF_TRIGGER_MASK) {
  1006. ret = __irq_set_trigger(desc, irq,
  1007. new->flags & IRQF_TRIGGER_MASK);
  1008. if (ret)
  1009. goto out_mask;
  1010. }
  1011. desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
  1012. IRQS_ONESHOT | IRQS_WAITING);
  1013. irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
  1014. if (new->flags & IRQF_PERCPU) {
  1015. irqd_set(&desc->irq_data, IRQD_PER_CPU);
  1016. irq_settings_set_per_cpu(desc);
  1017. }
  1018. if (new->flags & IRQF_ONESHOT)
  1019. desc->istate |= IRQS_ONESHOT;
  1020. if (irq_settings_can_autoenable(desc))
  1021. irq_startup(desc, true);
  1022. else
  1023. /* Undo nested disables: */
  1024. desc->depth = 1;
  1025. /* Exclude IRQ from balancing if requested */
  1026. if (new->flags & IRQF_NOBALANCING) {
  1027. irq_settings_set_no_balancing(desc);
  1028. irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
  1029. }
  1030. /* Set default affinity mask once everything is setup */
  1031. setup_affinity(irq, desc, mask);
  1032. } else if (new->flags & IRQF_TRIGGER_MASK) {
  1033. unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
  1034. unsigned int omsk = irq_settings_get_trigger_mask(desc);
  1035. if (nmsk != omsk)
  1036. /* hope the handler works with current trigger mode */
  1037. pr_warning("irq %d uses trigger mode %u; requested %u\n",
  1038. irq, nmsk, omsk);
  1039. }
  1040. new->irq = irq;
  1041. *old_ptr = new;
  1042. irq_pm_install_action(desc, new);
  1043. /* Reset broken irq detection when installing new handler */
  1044. desc->irq_count = 0;
  1045. desc->irqs_unhandled = 0;
  1046. /*
  1047. * Check whether we disabled the irq via the spurious handler
  1048. * before. Reenable it and give it another chance.
  1049. */
  1050. if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
  1051. desc->istate &= ~IRQS_SPURIOUS_DISABLED;
  1052. __enable_irq(desc, irq);
  1053. }
  1054. raw_spin_unlock_irqrestore(&desc->lock, flags);
  1055. /*
  1056. * Strictly no need to wake it up, but hung_task complains
  1057. * when no hard interrupt wakes the thread up.
  1058. */
  1059. if (new->thread)
  1060. wake_up_process(new->thread);
  1061. register_irq_proc(irq, desc);
  1062. new->dir = NULL;
  1063. register_handler_proc(irq, new);
  1064. free_cpumask_var(mask);
  1065. return 0;
  1066. mismatch:
  1067. if (!(new->flags & IRQF_PROBE_SHARED)) {
  1068. pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
  1069. irq, new->flags, new->name, old->flags, old->name);
  1070. #ifdef CONFIG_DEBUG_SHIRQ
  1071. dump_stack();
  1072. #endif
  1073. }
  1074. ret = -EBUSY;
  1075. out_mask:
  1076. raw_spin_unlock_irqrestore(&desc->lock, flags);
  1077. free_cpumask_var(mask);
  1078. out_thread:
  1079. if (new->thread) {
  1080. struct task_struct *t = new->thread;
  1081. new->thread = NULL;
  1082. kthread_stop(t);
  1083. put_task_struct(t);
  1084. }
  1085. out_mput:
  1086. module_put(desc->owner);
  1087. return ret;
  1088. }
  1089. /**
  1090. * setup_irq - setup an interrupt
  1091. * @irq: Interrupt line to setup
  1092. * @act: irqaction for the interrupt
  1093. *
  1094. * Used to statically setup interrupts in the early boot process.
  1095. */
  1096. int setup_irq(unsigned int irq, struct irqaction *act)
  1097. {
  1098. int retval;
  1099. struct irq_desc *desc = irq_to_desc(irq);
  1100. if (WARN_ON(irq_settings_is_per_cpu_devid(desc)))
  1101. return -EINVAL;
  1102. chip_bus_lock(desc);
  1103. retval = __setup_irq(irq, desc, act);
  1104. chip_bus_sync_unlock(desc);
  1105. return retval;
  1106. }
  1107. EXPORT_SYMBOL_GPL(setup_irq);
  1108. /*
  1109. * Internal function to unregister an irqaction - used to free
  1110. * regular and special interrupts that are part of the architecture.
  1111. */
  1112. static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
  1113. {
  1114. struct irq_desc *desc = irq_to_desc(irq);
  1115. struct irqaction *action, **action_ptr;
  1116. unsigned long flags;
  1117. WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
  1118. if (!desc)
  1119. return NULL;
  1120. raw_spin_lock_irqsave(&desc->lock, flags);
  1121. /*
  1122. * There can be multiple actions per IRQ descriptor, find the right
  1123. * one based on the dev_id:
  1124. */
  1125. action_ptr = &desc->action;
  1126. for (;;) {
  1127. action = *action_ptr;
  1128. if (!action) {
  1129. WARN(1, "Trying to free already-free IRQ %d\n", irq);
  1130. raw_spin_unlock_irqrestore(&desc->lock, flags);
  1131. return NULL;
  1132. }
  1133. if (action->dev_id == dev_id)
  1134. break;
  1135. action_ptr = &action->next;
  1136. }
  1137. /* Found it - now remove it from the list of entries: */
  1138. *action_ptr = action->next;
  1139. irq_pm_remove_action(desc, action);
  1140. /* If this was the last handler, shut down the IRQ line: */
  1141. if (!desc->action) {
  1142. irq_shutdown(desc);
  1143. irq_release_resources(desc);
  1144. }
  1145. #ifdef CONFIG_SMP
  1146. /* make sure affinity_hint is cleaned up */
  1147. if (WARN_ON_ONCE(desc->affinity_hint))
  1148. desc->affinity_hint = NULL;
  1149. #endif
  1150. raw_spin_unlock_irqrestore(&desc->lock, flags);
  1151. unregister_handler_proc(irq, action);
  1152. /* Make sure it's not being used on another CPU: */
  1153. synchronize_irq(irq);
  1154. #ifdef CONFIG_DEBUG_SHIRQ
  1155. /*
  1156. * It's a shared IRQ -- the driver ought to be prepared for an IRQ
  1157. * event to happen even now it's being freed, so let's make sure that
  1158. * is so by doing an extra call to the handler ....
  1159. *
  1160. * ( We do this after actually deregistering it, to make sure that a
  1161. * 'real' IRQ doesn't run in * parallel with our fake. )
  1162. */
  1163. if (action->flags & IRQF_SHARED) {
  1164. local_irq_save(flags);
  1165. action->handler(irq, dev_id);
  1166. local_irq_restore(flags);
  1167. }
  1168. #endif
  1169. if (action->thread) {
  1170. kthread_stop(action->thread);
  1171. put_task_struct(action->thread);
  1172. }
  1173. module_put(desc->owner);
  1174. return action;
  1175. }
  1176. /**
  1177. * remove_irq - free an interrupt
  1178. * @irq: Interrupt line to free
  1179. * @act: irqaction for the interrupt
  1180. *
  1181. * Used to remove interrupts statically setup by the early boot process.
  1182. */
  1183. void remove_irq(unsigned int irq, struct irqaction *act)
  1184. {
  1185. struct irq_desc *desc = irq_to_desc(irq);
  1186. if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc)))
  1187. __free_irq(irq, act->dev_id);
  1188. }
  1189. EXPORT_SYMBOL_GPL(remove_irq);
  1190. /**
  1191. * free_irq - free an interrupt allocated with request_irq
  1192. * @irq: Interrupt line to free
  1193. * @dev_id: Device identity to free
  1194. *
  1195. * Remove an interrupt handler. The handler is removed and if the
  1196. * interrupt line is no longer in use by any driver it is disabled.
  1197. * On a shared IRQ the caller must ensure the interrupt is disabled
  1198. * on the card it drives before calling this function. The function
  1199. * does not return until any executing interrupts for this IRQ
  1200. * have completed.
  1201. *
  1202. * This function must not be called from interrupt context.
  1203. */
  1204. void free_irq(unsigned int irq, void *dev_id)
  1205. {
  1206. struct irq_desc *desc = irq_to_desc(irq);
  1207. if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
  1208. return;
  1209. #ifdef CONFIG_SMP
  1210. if (WARN_ON(desc->affinity_notify))
  1211. desc->affinity_notify = NULL;
  1212. #endif
  1213. chip_bus_lock(desc);
  1214. kfree(__free_irq(irq, dev_id));
  1215. chip_bus_sync_unlock(desc);
  1216. }
  1217. EXPORT_SYMBOL(free_irq);
  1218. /**
  1219. * request_threaded_irq - allocate an interrupt line
  1220. * @irq: Interrupt line to allocate
  1221. * @handler: Function to be called when the IRQ occurs.
  1222. * Primary handler for threaded interrupts
  1223. * If NULL and thread_fn != NULL the default
  1224. * primary handler is installed
  1225. * @thread_fn: Function called from the irq handler thread
  1226. * If NULL, no irq thread is created
  1227. * @irqflags: Interrupt type flags
  1228. * @devname: An ascii name for the claiming device
  1229. * @dev_id: A cookie passed back to the handler function
  1230. *
  1231. * This call allocates interrupt resources and enables the
  1232. * interrupt line and IRQ handling. From the point this
  1233. * call is made your handler function may be invoked. Since
  1234. * your handler function must clear any interrupt the board
  1235. * raises, you must take care both to initialise your hardware
  1236. * and to set up the interrupt handler in the right order.
  1237. *
  1238. * If you want to set up a threaded irq handler for your device
  1239. * then you need to supply @handler and @thread_fn. @handler is
  1240. * still called in hard interrupt context and has to check
  1241. * whether the interrupt originates from the device. If yes it
  1242. * needs to disable the interrupt on the device and return
  1243. * IRQ_WAKE_THREAD which will wake up the handler thread and run
  1244. * @thread_fn. This split handler design is necessary to support
  1245. * shared interrupts.
  1246. *
  1247. * Dev_id must be globally unique. Normally the address of the
  1248. * device data structure is used as the cookie. Since the handler
  1249. * receives this value it makes sense to use it.
  1250. *
  1251. * If your interrupt is shared you must pass a non NULL dev_id
  1252. * as this is required when freeing the interrupt.
  1253. *
  1254. * Flags:
  1255. *
  1256. * IRQF_SHARED Interrupt is shared
  1257. * IRQF_TRIGGER_* Specify active edge(s) or level
  1258. *
  1259. */
  1260. int request_threaded_irq(unsigned int irq, irq_handler_t handler,
  1261. irq_handler_t thread_fn, unsigned long irqflags,
  1262. const char *devname, void *dev_id)
  1263. {
  1264. struct irqaction *action;
  1265. struct irq_desc *desc;
  1266. int retval;
  1267. /*
  1268. * Sanity-check: shared interrupts must pass in a real dev-ID,
  1269. * otherwise we'll have trouble later trying to figure out
  1270. * which interrupt is which (messes up the interrupt freeing
  1271. * logic etc).
  1272. */
  1273. if ((irqflags & IRQF_SHARED) && !dev_id)
  1274. return -EINVAL;
  1275. desc = irq_to_desc(irq);
  1276. if (!desc)
  1277. return -EINVAL;
  1278. if (!irq_settings_can_request(desc) ||
  1279. WARN_ON(irq_settings_is_per_cpu_devid(desc)))
  1280. return -EINVAL;
  1281. if (!handler) {
  1282. if (!thread_fn)
  1283. return -EINVAL;
  1284. handler = irq_default_primary_handler;
  1285. }
  1286. action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
  1287. if (!action)
  1288. return -ENOMEM;
  1289. action->handler = handler;
  1290. action->thread_fn = thread_fn;
  1291. action->flags = irqflags;
  1292. action->name = devname;
  1293. action->dev_id = dev_id;
  1294. chip_bus_lock(desc);
  1295. retval = __setup_irq(irq, desc, action);
  1296. chip_bus_sync_unlock(desc);
  1297. if (retval)
  1298. kfree(action);
  1299. #ifdef CONFIG_DEBUG_SHIRQ_FIXME
  1300. if (!retval && (irqflags & IRQF_SHARED)) {
  1301. /*
  1302. * It's a shared IRQ -- the driver ought to be prepared for it
  1303. * to happen immediately, so let's make sure....
  1304. * We disable the irq to make sure that a 'real' IRQ doesn't
  1305. * run in parallel with our fake.
  1306. */
  1307. unsigned long flags;
  1308. disable_irq(irq);
  1309. local_irq_save(flags);
  1310. handler(irq, dev_id);
  1311. local_irq_restore(flags);
  1312. enable_irq(irq);
  1313. }
  1314. #endif
  1315. return retval;
  1316. }
  1317. EXPORT_SYMBOL(request_threaded_irq);
  1318. /**
  1319. * request_any_context_irq - allocate an interrupt line
  1320. * @irq: Interrupt line to allocate
  1321. * @handler: Function to be called when the IRQ occurs.
  1322. * Threaded handler for threaded interrupts.
  1323. * @flags: Interrupt type flags
  1324. * @name: An ascii name for the claiming device
  1325. * @dev_id: A cookie passed back to the handler function
  1326. *
  1327. * This call allocates interrupt resources and enables the
  1328. * interrupt line and IRQ handling. It selects either a
  1329. * hardirq or threaded handling method depending on the
  1330. * context.
  1331. *
  1332. * On failure, it returns a negative value. On success,
  1333. * it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
  1334. */
  1335. int request_any_context_irq(unsigned int irq, irq_handler_t handler,
  1336. unsigned long flags, const char *name, void *dev_id)
  1337. {
  1338. struct irq_desc *desc = irq_to_desc(irq);
  1339. int ret;
  1340. if (!desc)
  1341. return -EINVAL;
  1342. if (irq_settings_is_nested_thread(desc)) {
  1343. ret = request_threaded_irq(irq, NULL, handler,
  1344. flags, name, dev_id);
  1345. return !ret ? IRQC_IS_NESTED : ret;
  1346. }
  1347. ret = request_irq(irq, handler, flags, name, dev_id);
  1348. return !ret ? IRQC_IS_HARDIRQ : ret;
  1349. }
  1350. EXPORT_SYMBOL_GPL(request_any_context_irq);
  1351. void enable_percpu_irq(unsigned int irq, unsigned int type)
  1352. {
  1353. unsigned int cpu = smp_processor_id();
  1354. unsigned long flags;
  1355. struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
  1356. if (!desc)
  1357. return;
  1358. type &= IRQ_TYPE_SENSE_MASK;
  1359. if (type != IRQ_TYPE_NONE) {
  1360. int ret;
  1361. ret = __irq_set_trigger(desc, irq, type);
  1362. if (ret) {
  1363. WARN(1, "failed to set type for IRQ%d\n", irq);
  1364. goto out;
  1365. }
  1366. }
  1367. irq_percpu_enable(desc, cpu);
  1368. out:
  1369. irq_put_desc_unlock(desc, flags);
  1370. }
  1371. EXPORT_SYMBOL_GPL(enable_percpu_irq);
  1372. void disable_percpu_irq(unsigned int irq)
  1373. {
  1374. unsigned int cpu = smp_processor_id();
  1375. unsigned long flags;
  1376. struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
  1377. if (!desc)
  1378. return;
  1379. irq_percpu_disable(desc, cpu);
  1380. irq_put_desc_unlock(desc, flags);
  1381. }
  1382. EXPORT_SYMBOL_GPL(disable_percpu_irq);
  1383. /*
  1384. * Internal function to unregister a percpu irqaction.
  1385. */
  1386. static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
  1387. {
  1388. struct irq_desc *desc = irq_to_desc(irq);
  1389. struct irqaction *action;
  1390. unsigned long flags;
  1391. WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
  1392. if (!desc)
  1393. return NULL;
  1394. raw_spin_lock_irqsave(&desc->lock, flags);
  1395. action = desc->action;
  1396. if (!action || action->percpu_dev_id != dev_id) {
  1397. WARN(1, "Trying to free already-free IRQ %d\n", irq);
  1398. goto bad;
  1399. }
  1400. if (!cpumask_empty(desc->percpu_enabled)) {
  1401. WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
  1402. irq, cpumask_first(desc->percpu_enabled));
  1403. goto bad;
  1404. }
  1405. /* Found it - now remove it from the list of entries: */
  1406. desc->action = NULL;
  1407. raw_spin_unlock_irqrestore(&desc->lock, flags);
  1408. unregister_handler_proc(irq, action);
  1409. module_put(desc->owner);
  1410. return action;
  1411. bad:
  1412. raw_spin_unlock_irqrestore(&desc->lock, flags);
  1413. return NULL;
  1414. }
  1415. /**
  1416. * remove_percpu_irq - free a per-cpu interrupt
  1417. * @irq: Interrupt line to free
  1418. * @act: irqaction for the interrupt
  1419. *
  1420. * Used to remove interrupts statically setup by the early boot process.
  1421. */
  1422. void remove_percpu_irq(unsigned int irq, struct irqaction *act)
  1423. {
  1424. struct irq_desc *desc = irq_to_desc(irq);
  1425. if (desc && irq_settings_is_per_cpu_devid(desc))
  1426. __free_percpu_irq(irq, act->percpu_dev_id);
  1427. }
  1428. /**
  1429. * free_percpu_irq - free an interrupt allocated with request_percpu_irq
  1430. * @irq: Interrupt line to free
  1431. * @dev_id: Device identity to free
  1432. *
  1433. * Remove a percpu interrupt handler. The handler is removed, but
  1434. * the interrupt line is not disabled. This must be done on each
  1435. * CPU before calling this function. The function does not return
  1436. * until any executing interrupts for this IRQ have completed.
  1437. *
  1438. * This function must not be called from interrupt context.
  1439. */
  1440. void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
  1441. {
  1442. struct irq_desc *desc = irq_to_desc(irq);
  1443. if (!desc || !irq_settings_is_per_cpu_devid(desc))
  1444. return;
  1445. chip_bus_lock(desc);
  1446. kfree(__free_percpu_irq(irq, dev_id));
  1447. chip_bus_sync_unlock(desc);
  1448. }
  1449. /**
  1450. * setup_percpu_irq - setup a per-cpu interrupt
  1451. * @irq: Interrupt line to setup
  1452. * @act: irqaction for the interrupt
  1453. *
  1454. * Used to statically setup per-cpu interrupts in the early boot process.
  1455. */
  1456. int setup_percpu_irq(unsigned int irq, struct irqaction *act)
  1457. {
  1458. struct irq_desc *desc = irq_to_desc(irq);
  1459. int retval;
  1460. if (!desc || !irq_settings_is_per_cpu_devid(desc))
  1461. return -EINVAL;
  1462. chip_bus_lock(desc);
  1463. retval = __setup_irq(irq, desc, act);
  1464. chip_bus_sync_unlock(desc);
  1465. return retval;
  1466. }
  1467. /**
  1468. * request_percpu_irq - allocate a percpu interrupt line
  1469. * @irq: Interrupt line to allocate
  1470. * @handler: Function to be called when the IRQ occurs.
  1471. * @devname: An ascii name for the claiming device
  1472. * @dev_id: A percpu cookie passed back to the handler function
  1473. *
  1474. * This call allocates interrupt resources, but doesn't
  1475. * automatically enable the interrupt. It has to be done on each
  1476. * CPU using enable_percpu_irq().
  1477. *
  1478. * Dev_id must be globally unique. It is a per-cpu variable, and
  1479. * the handler gets called with the interrupted CPU's instance of
  1480. * that variable.
  1481. */
  1482. int request_percpu_irq(unsigned int irq, irq_handler_t handler,
  1483. const char *devname, void __percpu *dev_id)
  1484. {
  1485. struct irqaction *action;
  1486. struct irq_desc *desc;
  1487. int retval;
  1488. if (!dev_id)
  1489. return -EINVAL;
  1490. desc = irq_to_desc(irq);
  1491. if (!desc || !irq_settings_can_request(desc) ||
  1492. !irq_settings_is_per_cpu_devid(desc))
  1493. return -EINVAL;
  1494. action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
  1495. if (!action)
  1496. return -ENOMEM;
  1497. action->handler = handler;
  1498. action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND;
  1499. action->name = devname;
  1500. action->percpu_dev_id = dev_id;
  1501. chip_bus_lock(desc);
  1502. retval = __setup_irq(irq, desc, action);
  1503. chip_bus_sync_unlock(desc);
  1504. if (retval)
  1505. kfree(action);
  1506. return retval;
  1507. }