irqdesc.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937
  1. /*
  2. * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
  3. * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
  4. *
  5. * This file contains the interrupt descriptor management code
  6. *
  7. * Detailed information is available in Documentation/core-api/genericirq.rst
  8. *
  9. */
  10. #include <linux/irq.h>
  11. #include <linux/slab.h>
  12. #include <linux/export.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/kernel_stat.h>
  15. #include <linux/radix-tree.h>
  16. #include <linux/bitmap.h>
  17. #include <linux/irqdomain.h>
  18. #include <linux/sysfs.h>
  19. #include "internals.h"
  20. /*
  21. * lockdep: we want to handle all irq_desc locks as a single lock-class:
  22. */
  23. static struct lock_class_key irq_desc_lock_class;
  24. #if defined(CONFIG_SMP)
  25. static int __init irq_affinity_setup(char *str)
  26. {
  27. zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
  28. cpulist_parse(str, irq_default_affinity);
  29. /*
  30. * Set at least the boot cpu. We don't want to end up with
  31. * bugreports caused by random comandline masks
  32. */
  33. cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
  34. return 1;
  35. }
  36. __setup("irqaffinity=", irq_affinity_setup);
  37. static void __init init_irq_default_affinity(void)
  38. {
  39. #ifdef CONFIG_CPUMASK_OFFSTACK
  40. if (!irq_default_affinity)
  41. zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
  42. #endif
  43. if (cpumask_empty(irq_default_affinity))
  44. cpumask_setall(irq_default_affinity);
  45. }
  46. #else
  47. static void __init init_irq_default_affinity(void)
  48. {
  49. }
  50. #endif
  51. #ifdef CONFIG_SMP
  52. static int alloc_masks(struct irq_desc *desc, int node)
  53. {
  54. if (!zalloc_cpumask_var_node(&desc->irq_common_data.affinity,
  55. GFP_KERNEL, node))
  56. return -ENOMEM;
  57. #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
  58. if (!zalloc_cpumask_var_node(&desc->irq_common_data.effective_affinity,
  59. GFP_KERNEL, node)) {
  60. free_cpumask_var(desc->irq_common_data.affinity);
  61. return -ENOMEM;
  62. }
  63. #endif
  64. #ifdef CONFIG_GENERIC_PENDING_IRQ
  65. if (!zalloc_cpumask_var_node(&desc->pending_mask, GFP_KERNEL, node)) {
  66. #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
  67. free_cpumask_var(desc->irq_common_data.effective_affinity);
  68. #endif
  69. free_cpumask_var(desc->irq_common_data.affinity);
  70. return -ENOMEM;
  71. }
  72. #endif
  73. return 0;
  74. }
  75. static void desc_smp_init(struct irq_desc *desc, int node,
  76. const struct cpumask *affinity)
  77. {
  78. if (!affinity)
  79. affinity = irq_default_affinity;
  80. cpumask_copy(desc->irq_common_data.affinity, affinity);
  81. #ifdef CONFIG_GENERIC_PENDING_IRQ
  82. cpumask_clear(desc->pending_mask);
  83. #endif
  84. #ifdef CONFIG_NUMA
  85. desc->irq_common_data.node = node;
  86. #endif
  87. }
  88. #else
  89. static inline int
  90. alloc_masks(struct irq_desc *desc, int node) { return 0; }
  91. static inline void
  92. desc_smp_init(struct irq_desc *desc, int node, const struct cpumask *affinity) { }
  93. #endif
  94. static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node,
  95. const struct cpumask *affinity, struct module *owner)
  96. {
  97. int cpu;
  98. desc->irq_common_data.handler_data = NULL;
  99. desc->irq_common_data.msi_desc = NULL;
  100. desc->irq_data.common = &desc->irq_common_data;
  101. desc->irq_data.irq = irq;
  102. desc->irq_data.chip = &no_irq_chip;
  103. desc->irq_data.chip_data = NULL;
  104. irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS);
  105. irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
  106. irqd_set(&desc->irq_data, IRQD_IRQ_MASKED);
  107. desc->handle_irq = handle_bad_irq;
  108. desc->depth = 1;
  109. desc->irq_count = 0;
  110. desc->irqs_unhandled = 0;
  111. desc->name = NULL;
  112. desc->owner = owner;
  113. for_each_possible_cpu(cpu)
  114. *per_cpu_ptr(desc->kstat_irqs, cpu) = 0;
  115. desc_smp_init(desc, node, affinity);
  116. }
  117. int nr_irqs = NR_IRQS;
  118. EXPORT_SYMBOL_GPL(nr_irqs);
  119. static DEFINE_MUTEX(sparse_irq_lock);
  120. static DECLARE_BITMAP(allocated_irqs, IRQ_BITMAP_BITS);
  121. #ifdef CONFIG_SPARSE_IRQ
  122. static void irq_kobj_release(struct kobject *kobj);
  123. #ifdef CONFIG_SYSFS
  124. static struct kobject *irq_kobj_base;
  125. #define IRQ_ATTR_RO(_name) \
  126. static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
  127. static ssize_t per_cpu_count_show(struct kobject *kobj,
  128. struct kobj_attribute *attr, char *buf)
  129. {
  130. struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
  131. int cpu, irq = desc->irq_data.irq;
  132. ssize_t ret = 0;
  133. char *p = "";
  134. for_each_possible_cpu(cpu) {
  135. unsigned int c = kstat_irqs_cpu(irq, cpu);
  136. ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%u", p, c);
  137. p = ",";
  138. }
  139. ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
  140. return ret;
  141. }
  142. IRQ_ATTR_RO(per_cpu_count);
  143. static ssize_t chip_name_show(struct kobject *kobj,
  144. struct kobj_attribute *attr, char *buf)
  145. {
  146. struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
  147. ssize_t ret = 0;
  148. raw_spin_lock_irq(&desc->lock);
  149. if (desc->irq_data.chip && desc->irq_data.chip->name) {
  150. ret = scnprintf(buf, PAGE_SIZE, "%s\n",
  151. desc->irq_data.chip->name);
  152. }
  153. raw_spin_unlock_irq(&desc->lock);
  154. return ret;
  155. }
  156. IRQ_ATTR_RO(chip_name);
  157. static ssize_t hwirq_show(struct kobject *kobj,
  158. struct kobj_attribute *attr, char *buf)
  159. {
  160. struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
  161. ssize_t ret = 0;
  162. raw_spin_lock_irq(&desc->lock);
  163. if (desc->irq_data.domain)
  164. ret = sprintf(buf, "%d\n", (int)desc->irq_data.hwirq);
  165. raw_spin_unlock_irq(&desc->lock);
  166. return ret;
  167. }
  168. IRQ_ATTR_RO(hwirq);
  169. static ssize_t type_show(struct kobject *kobj,
  170. struct kobj_attribute *attr, char *buf)
  171. {
  172. struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
  173. ssize_t ret = 0;
  174. raw_spin_lock_irq(&desc->lock);
  175. ret = sprintf(buf, "%s\n",
  176. irqd_is_level_type(&desc->irq_data) ? "level" : "edge");
  177. raw_spin_unlock_irq(&desc->lock);
  178. return ret;
  179. }
  180. IRQ_ATTR_RO(type);
  181. static ssize_t name_show(struct kobject *kobj,
  182. struct kobj_attribute *attr, char *buf)
  183. {
  184. struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
  185. ssize_t ret = 0;
  186. raw_spin_lock_irq(&desc->lock);
  187. if (desc->name)
  188. ret = scnprintf(buf, PAGE_SIZE, "%s\n", desc->name);
  189. raw_spin_unlock_irq(&desc->lock);
  190. return ret;
  191. }
  192. IRQ_ATTR_RO(name);
  193. static ssize_t actions_show(struct kobject *kobj,
  194. struct kobj_attribute *attr, char *buf)
  195. {
  196. struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
  197. struct irqaction *action;
  198. ssize_t ret = 0;
  199. char *p = "";
  200. raw_spin_lock_irq(&desc->lock);
  201. for (action = desc->action; action != NULL; action = action->next) {
  202. ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%s",
  203. p, action->name);
  204. p = ",";
  205. }
  206. raw_spin_unlock_irq(&desc->lock);
  207. if (ret)
  208. ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
  209. return ret;
  210. }
  211. IRQ_ATTR_RO(actions);
  212. static struct attribute *irq_attrs[] = {
  213. &per_cpu_count_attr.attr,
  214. &chip_name_attr.attr,
  215. &hwirq_attr.attr,
  216. &type_attr.attr,
  217. &name_attr.attr,
  218. &actions_attr.attr,
  219. NULL
  220. };
  221. static struct kobj_type irq_kobj_type = {
  222. .release = irq_kobj_release,
  223. .sysfs_ops = &kobj_sysfs_ops,
  224. .default_attrs = irq_attrs,
  225. };
  226. static void irq_sysfs_add(int irq, struct irq_desc *desc)
  227. {
  228. if (irq_kobj_base) {
  229. /*
  230. * Continue even in case of failure as this is nothing
  231. * crucial.
  232. */
  233. if (kobject_add(&desc->kobj, irq_kobj_base, "%d", irq))
  234. pr_warn("Failed to add kobject for irq %d\n", irq);
  235. }
  236. }
  237. static int __init irq_sysfs_init(void)
  238. {
  239. struct irq_desc *desc;
  240. int irq;
  241. /* Prevent concurrent irq alloc/free */
  242. irq_lock_sparse();
  243. irq_kobj_base = kobject_create_and_add("irq", kernel_kobj);
  244. if (!irq_kobj_base) {
  245. irq_unlock_sparse();
  246. return -ENOMEM;
  247. }
  248. /* Add the already allocated interrupts */
  249. for_each_irq_desc(irq, desc)
  250. irq_sysfs_add(irq, desc);
  251. irq_unlock_sparse();
  252. return 0;
  253. }
  254. postcore_initcall(irq_sysfs_init);
  255. #else /* !CONFIG_SYSFS */
  256. static struct kobj_type irq_kobj_type = {
  257. .release = irq_kobj_release,
  258. };
  259. static void irq_sysfs_add(int irq, struct irq_desc *desc) {}
  260. #endif /* CONFIG_SYSFS */
  261. static RADIX_TREE(irq_desc_tree, GFP_KERNEL);
  262. static void irq_insert_desc(unsigned int irq, struct irq_desc *desc)
  263. {
  264. radix_tree_insert(&irq_desc_tree, irq, desc);
  265. }
  266. struct irq_desc *irq_to_desc(unsigned int irq)
  267. {
  268. return radix_tree_lookup(&irq_desc_tree, irq);
  269. }
  270. EXPORT_SYMBOL(irq_to_desc);
  271. static void delete_irq_desc(unsigned int irq)
  272. {
  273. radix_tree_delete(&irq_desc_tree, irq);
  274. }
  275. #ifdef CONFIG_SMP
  276. static void free_masks(struct irq_desc *desc)
  277. {
  278. #ifdef CONFIG_GENERIC_PENDING_IRQ
  279. free_cpumask_var(desc->pending_mask);
  280. #endif
  281. free_cpumask_var(desc->irq_common_data.affinity);
  282. #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
  283. free_cpumask_var(desc->irq_common_data.effective_affinity);
  284. #endif
  285. }
  286. #else
  287. static inline void free_masks(struct irq_desc *desc) { }
  288. #endif
  289. void irq_lock_sparse(void)
  290. {
  291. mutex_lock(&sparse_irq_lock);
  292. }
  293. void irq_unlock_sparse(void)
  294. {
  295. mutex_unlock(&sparse_irq_lock);
  296. }
  297. static struct irq_desc *alloc_desc(int irq, int node, unsigned int flags,
  298. const struct cpumask *affinity,
  299. struct module *owner)
  300. {
  301. struct irq_desc *desc;
  302. desc = kzalloc_node(sizeof(*desc), GFP_KERNEL, node);
  303. if (!desc)
  304. return NULL;
  305. /* allocate based on nr_cpu_ids */
  306. desc->kstat_irqs = alloc_percpu(unsigned int);
  307. if (!desc->kstat_irqs)
  308. goto err_desc;
  309. if (alloc_masks(desc, node))
  310. goto err_kstat;
  311. raw_spin_lock_init(&desc->lock);
  312. lockdep_set_class(&desc->lock, &irq_desc_lock_class);
  313. mutex_init(&desc->request_mutex);
  314. init_rcu_head(&desc->rcu);
  315. desc_set_defaults(irq, desc, node, affinity, owner);
  316. irqd_set(&desc->irq_data, flags);
  317. kobject_init(&desc->kobj, &irq_kobj_type);
  318. return desc;
  319. err_kstat:
  320. free_percpu(desc->kstat_irqs);
  321. err_desc:
  322. kfree(desc);
  323. return NULL;
  324. }
  325. static void irq_kobj_release(struct kobject *kobj)
  326. {
  327. struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
  328. free_masks(desc);
  329. free_percpu(desc->kstat_irqs);
  330. kfree(desc);
  331. }
  332. static void delayed_free_desc(struct rcu_head *rhp)
  333. {
  334. struct irq_desc *desc = container_of(rhp, struct irq_desc, rcu);
  335. kobject_put(&desc->kobj);
  336. }
  337. static void free_desc(unsigned int irq)
  338. {
  339. struct irq_desc *desc = irq_to_desc(irq);
  340. irq_remove_debugfs_entry(desc);
  341. unregister_irq_proc(irq, desc);
  342. /*
  343. * sparse_irq_lock protects also show_interrupts() and
  344. * kstat_irq_usr(). Once we deleted the descriptor from the
  345. * sparse tree we can free it. Access in proc will fail to
  346. * lookup the descriptor.
  347. *
  348. * The sysfs entry must be serialized against a concurrent
  349. * irq_sysfs_init() as well.
  350. */
  351. mutex_lock(&sparse_irq_lock);
  352. kobject_del(&desc->kobj);
  353. delete_irq_desc(irq);
  354. mutex_unlock(&sparse_irq_lock);
  355. /*
  356. * We free the descriptor, masks and stat fields via RCU. That
  357. * allows demultiplex interrupts to do rcu based management of
  358. * the child interrupts.
  359. */
  360. call_rcu(&desc->rcu, delayed_free_desc);
  361. }
  362. static int alloc_descs(unsigned int start, unsigned int cnt, int node,
  363. const struct cpumask *affinity, struct module *owner)
  364. {
  365. const struct cpumask *mask = NULL;
  366. struct irq_desc *desc;
  367. unsigned int flags;
  368. int i;
  369. /* Validate affinity mask(s) */
  370. if (affinity) {
  371. for (i = 0, mask = affinity; i < cnt; i++, mask++) {
  372. if (cpumask_empty(mask))
  373. return -EINVAL;
  374. }
  375. }
  376. flags = affinity ? IRQD_AFFINITY_MANAGED : 0;
  377. mask = NULL;
  378. for (i = 0; i < cnt; i++) {
  379. if (affinity) {
  380. node = cpu_to_node(cpumask_first(affinity));
  381. mask = affinity;
  382. affinity++;
  383. }
  384. desc = alloc_desc(start + i, node, flags, mask, owner);
  385. if (!desc)
  386. goto err;
  387. mutex_lock(&sparse_irq_lock);
  388. irq_insert_desc(start + i, desc);
  389. irq_sysfs_add(start + i, desc);
  390. mutex_unlock(&sparse_irq_lock);
  391. }
  392. return start;
  393. err:
  394. for (i--; i >= 0; i--)
  395. free_desc(start + i);
  396. mutex_lock(&sparse_irq_lock);
  397. bitmap_clear(allocated_irqs, start, cnt);
  398. mutex_unlock(&sparse_irq_lock);
  399. return -ENOMEM;
  400. }
  401. static int irq_expand_nr_irqs(unsigned int nr)
  402. {
  403. if (nr > IRQ_BITMAP_BITS)
  404. return -ENOMEM;
  405. nr_irqs = nr;
  406. return 0;
  407. }
  408. int __init early_irq_init(void)
  409. {
  410. int i, initcnt, node = first_online_node;
  411. struct irq_desc *desc;
  412. init_irq_default_affinity();
  413. /* Let arch update nr_irqs and return the nr of preallocated irqs */
  414. initcnt = arch_probe_nr_irqs();
  415. printk(KERN_INFO "NR_IRQS: %d, nr_irqs: %d, preallocated irqs: %d\n",
  416. NR_IRQS, nr_irqs, initcnt);
  417. if (WARN_ON(nr_irqs > IRQ_BITMAP_BITS))
  418. nr_irqs = IRQ_BITMAP_BITS;
  419. if (WARN_ON(initcnt > IRQ_BITMAP_BITS))
  420. initcnt = IRQ_BITMAP_BITS;
  421. if (initcnt > nr_irqs)
  422. nr_irqs = initcnt;
  423. for (i = 0; i < initcnt; i++) {
  424. desc = alloc_desc(i, node, 0, NULL, NULL);
  425. set_bit(i, allocated_irqs);
  426. irq_insert_desc(i, desc);
  427. }
  428. return arch_early_irq_init();
  429. }
  430. #else /* !CONFIG_SPARSE_IRQ */
  431. struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
  432. [0 ... NR_IRQS-1] = {
  433. .handle_irq = handle_bad_irq,
  434. .depth = 1,
  435. .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
  436. }
  437. };
  438. int __init early_irq_init(void)
  439. {
  440. int count, i, node = first_online_node;
  441. struct irq_desc *desc;
  442. init_irq_default_affinity();
  443. printk(KERN_INFO "NR_IRQS: %d\n", NR_IRQS);
  444. desc = irq_desc;
  445. count = ARRAY_SIZE(irq_desc);
  446. for (i = 0; i < count; i++) {
  447. desc[i].kstat_irqs = alloc_percpu(unsigned int);
  448. alloc_masks(&desc[i], node);
  449. raw_spin_lock_init(&desc[i].lock);
  450. lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
  451. desc_set_defaults(i, &desc[i], node, NULL, NULL);
  452. }
  453. return arch_early_irq_init();
  454. }
  455. struct irq_desc *irq_to_desc(unsigned int irq)
  456. {
  457. return (irq < NR_IRQS) ? irq_desc + irq : NULL;
  458. }
  459. EXPORT_SYMBOL(irq_to_desc);
  460. static void free_desc(unsigned int irq)
  461. {
  462. struct irq_desc *desc = irq_to_desc(irq);
  463. unsigned long flags;
  464. raw_spin_lock_irqsave(&desc->lock, flags);
  465. desc_set_defaults(irq, desc, irq_desc_get_node(desc), NULL, NULL);
  466. raw_spin_unlock_irqrestore(&desc->lock, flags);
  467. }
  468. static inline int alloc_descs(unsigned int start, unsigned int cnt, int node,
  469. const struct cpumask *affinity,
  470. struct module *owner)
  471. {
  472. u32 i;
  473. for (i = 0; i < cnt; i++) {
  474. struct irq_desc *desc = irq_to_desc(start + i);
  475. desc->owner = owner;
  476. }
  477. return start;
  478. }
  479. static int irq_expand_nr_irqs(unsigned int nr)
  480. {
  481. return -ENOMEM;
  482. }
  483. void irq_mark_irq(unsigned int irq)
  484. {
  485. mutex_lock(&sparse_irq_lock);
  486. bitmap_set(allocated_irqs, irq, 1);
  487. mutex_unlock(&sparse_irq_lock);
  488. }
  489. #ifdef CONFIG_GENERIC_IRQ_LEGACY
  490. void irq_init_desc(unsigned int irq)
  491. {
  492. free_desc(irq);
  493. }
  494. #endif
  495. #endif /* !CONFIG_SPARSE_IRQ */
  496. /**
  497. * generic_handle_irq - Invoke the handler for a particular irq
  498. * @irq: The irq number to handle
  499. *
  500. */
  501. int generic_handle_irq(unsigned int irq)
  502. {
  503. struct irq_desc *desc = irq_to_desc(irq);
  504. if (!desc)
  505. return -EINVAL;
  506. generic_handle_irq_desc(desc);
  507. return 0;
  508. }
  509. EXPORT_SYMBOL_GPL(generic_handle_irq);
  510. #ifdef CONFIG_HANDLE_DOMAIN_IRQ
  511. /**
  512. * __handle_domain_irq - Invoke the handler for a HW irq belonging to a domain
  513. * @domain: The domain where to perform the lookup
  514. * @hwirq: The HW irq number to convert to a logical one
  515. * @lookup: Whether to perform the domain lookup or not
  516. * @regs: Register file coming from the low-level handling code
  517. *
  518. * Returns: 0 on success, or -EINVAL if conversion has failed
  519. */
  520. int __handle_domain_irq(struct irq_domain *domain, unsigned int hwirq,
  521. bool lookup, struct pt_regs *regs)
  522. {
  523. struct pt_regs *old_regs = set_irq_regs(regs);
  524. unsigned int irq = hwirq;
  525. int ret = 0;
  526. irq_enter();
  527. #ifdef CONFIG_IRQ_DOMAIN
  528. if (lookup)
  529. irq = irq_find_mapping(domain, hwirq);
  530. #endif
  531. /*
  532. * Some hardware gives randomly wrong interrupts. Rather
  533. * than crashing, do something sensible.
  534. */
  535. if (unlikely(!irq || irq >= nr_irqs)) {
  536. ack_bad_irq(irq);
  537. ret = -EINVAL;
  538. } else {
  539. generic_handle_irq(irq);
  540. }
  541. irq_exit();
  542. set_irq_regs(old_regs);
  543. return ret;
  544. }
  545. #endif
  546. /* Dynamic interrupt handling */
  547. /**
  548. * irq_free_descs - free irq descriptors
  549. * @from: Start of descriptor range
  550. * @cnt: Number of consecutive irqs to free
  551. */
  552. void irq_free_descs(unsigned int from, unsigned int cnt)
  553. {
  554. int i;
  555. if (from >= nr_irqs || (from + cnt) > nr_irqs)
  556. return;
  557. for (i = 0; i < cnt; i++)
  558. free_desc(from + i);
  559. mutex_lock(&sparse_irq_lock);
  560. bitmap_clear(allocated_irqs, from, cnt);
  561. mutex_unlock(&sparse_irq_lock);
  562. }
  563. EXPORT_SYMBOL_GPL(irq_free_descs);
  564. /**
  565. * irq_alloc_descs - allocate and initialize a range of irq descriptors
  566. * @irq: Allocate for specific irq number if irq >= 0
  567. * @from: Start the search from this irq number
  568. * @cnt: Number of consecutive irqs to allocate.
  569. * @node: Preferred node on which the irq descriptor should be allocated
  570. * @owner: Owning module (can be NULL)
  571. * @affinity: Optional pointer to an affinity mask array of size @cnt which
  572. * hints where the irq descriptors should be allocated and which
  573. * default affinities to use
  574. *
  575. * Returns the first irq number or error code
  576. */
  577. int __ref
  578. __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
  579. struct module *owner, const struct cpumask *affinity)
  580. {
  581. int start, ret;
  582. if (!cnt)
  583. return -EINVAL;
  584. if (irq >= 0) {
  585. if (from > irq)
  586. return -EINVAL;
  587. from = irq;
  588. } else {
  589. /*
  590. * For interrupts which are freely allocated the
  591. * architecture can force a lower bound to the @from
  592. * argument. x86 uses this to exclude the GSI space.
  593. */
  594. from = arch_dynirq_lower_bound(from);
  595. }
  596. mutex_lock(&sparse_irq_lock);
  597. start = bitmap_find_next_zero_area(allocated_irqs, IRQ_BITMAP_BITS,
  598. from, cnt, 0);
  599. ret = -EEXIST;
  600. if (irq >=0 && start != irq)
  601. goto err;
  602. if (start + cnt > nr_irqs) {
  603. ret = irq_expand_nr_irqs(start + cnt);
  604. if (ret)
  605. goto err;
  606. }
  607. bitmap_set(allocated_irqs, start, cnt);
  608. mutex_unlock(&sparse_irq_lock);
  609. return alloc_descs(start, cnt, node, affinity, owner);
  610. err:
  611. mutex_unlock(&sparse_irq_lock);
  612. return ret;
  613. }
  614. EXPORT_SYMBOL_GPL(__irq_alloc_descs);
  615. #ifdef CONFIG_GENERIC_IRQ_LEGACY_ALLOC_HWIRQ
  616. /**
  617. * irq_alloc_hwirqs - Allocate an irq descriptor and initialize the hardware
  618. * @cnt: number of interrupts to allocate
  619. * @node: node on which to allocate
  620. *
  621. * Returns an interrupt number > 0 or 0, if the allocation fails.
  622. */
  623. unsigned int irq_alloc_hwirqs(int cnt, int node)
  624. {
  625. int i, irq = __irq_alloc_descs(-1, 0, cnt, node, NULL, NULL);
  626. if (irq < 0)
  627. return 0;
  628. for (i = irq; cnt > 0; i++, cnt--) {
  629. if (arch_setup_hwirq(i, node))
  630. goto err;
  631. irq_clear_status_flags(i, _IRQ_NOREQUEST);
  632. }
  633. return irq;
  634. err:
  635. for (i--; i >= irq; i--) {
  636. irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE);
  637. arch_teardown_hwirq(i);
  638. }
  639. irq_free_descs(irq, cnt);
  640. return 0;
  641. }
  642. EXPORT_SYMBOL_GPL(irq_alloc_hwirqs);
  643. /**
  644. * irq_free_hwirqs - Free irq descriptor and cleanup the hardware
  645. * @from: Free from irq number
  646. * @cnt: number of interrupts to free
  647. *
  648. */
  649. void irq_free_hwirqs(unsigned int from, int cnt)
  650. {
  651. int i, j;
  652. for (i = from, j = cnt; j > 0; i++, j--) {
  653. irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE);
  654. arch_teardown_hwirq(i);
  655. }
  656. irq_free_descs(from, cnt);
  657. }
  658. EXPORT_SYMBOL_GPL(irq_free_hwirqs);
  659. #endif
  660. /**
  661. * irq_get_next_irq - get next allocated irq number
  662. * @offset: where to start the search
  663. *
  664. * Returns next irq number after offset or nr_irqs if none is found.
  665. */
  666. unsigned int irq_get_next_irq(unsigned int offset)
  667. {
  668. return find_next_bit(allocated_irqs, nr_irqs, offset);
  669. }
  670. struct irq_desc *
  671. __irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus,
  672. unsigned int check)
  673. {
  674. struct irq_desc *desc = irq_to_desc(irq);
  675. if (desc) {
  676. if (check & _IRQ_DESC_CHECK) {
  677. if ((check & _IRQ_DESC_PERCPU) &&
  678. !irq_settings_is_per_cpu_devid(desc))
  679. return NULL;
  680. if (!(check & _IRQ_DESC_PERCPU) &&
  681. irq_settings_is_per_cpu_devid(desc))
  682. return NULL;
  683. }
  684. if (bus)
  685. chip_bus_lock(desc);
  686. raw_spin_lock_irqsave(&desc->lock, *flags);
  687. }
  688. return desc;
  689. }
  690. void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus)
  691. {
  692. raw_spin_unlock_irqrestore(&desc->lock, flags);
  693. if (bus)
  694. chip_bus_sync_unlock(desc);
  695. }
  696. int irq_set_percpu_devid_partition(unsigned int irq,
  697. const struct cpumask *affinity)
  698. {
  699. struct irq_desc *desc = irq_to_desc(irq);
  700. if (!desc)
  701. return -EINVAL;
  702. if (desc->percpu_enabled)
  703. return -EINVAL;
  704. desc->percpu_enabled = kzalloc(sizeof(*desc->percpu_enabled), GFP_KERNEL);
  705. if (!desc->percpu_enabled)
  706. return -ENOMEM;
  707. if (affinity)
  708. desc->percpu_affinity = affinity;
  709. else
  710. desc->percpu_affinity = cpu_possible_mask;
  711. irq_set_percpu_devid_flags(irq);
  712. return 0;
  713. }
  714. int irq_set_percpu_devid(unsigned int irq)
  715. {
  716. return irq_set_percpu_devid_partition(irq, NULL);
  717. }
  718. int irq_get_percpu_devid_partition(unsigned int irq, struct cpumask *affinity)
  719. {
  720. struct irq_desc *desc = irq_to_desc(irq);
  721. if (!desc || !desc->percpu_enabled)
  722. return -EINVAL;
  723. if (affinity)
  724. cpumask_copy(affinity, desc->percpu_affinity);
  725. return 0;
  726. }
  727. void kstat_incr_irq_this_cpu(unsigned int irq)
  728. {
  729. kstat_incr_irqs_this_cpu(irq_to_desc(irq));
  730. }
  731. /**
  732. * kstat_irqs_cpu - Get the statistics for an interrupt on a cpu
  733. * @irq: The interrupt number
  734. * @cpu: The cpu number
  735. *
  736. * Returns the sum of interrupt counts on @cpu since boot for
  737. * @irq. The caller must ensure that the interrupt is not removed
  738. * concurrently.
  739. */
  740. unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
  741. {
  742. struct irq_desc *desc = irq_to_desc(irq);
  743. return desc && desc->kstat_irqs ?
  744. *per_cpu_ptr(desc->kstat_irqs, cpu) : 0;
  745. }
  746. /**
  747. * kstat_irqs - Get the statistics for an interrupt
  748. * @irq: The interrupt number
  749. *
  750. * Returns the sum of interrupt counts on all cpus since boot for
  751. * @irq. The caller must ensure that the interrupt is not removed
  752. * concurrently.
  753. */
  754. unsigned int kstat_irqs(unsigned int irq)
  755. {
  756. struct irq_desc *desc = irq_to_desc(irq);
  757. int cpu;
  758. unsigned int sum = 0;
  759. if (!desc || !desc->kstat_irqs)
  760. return 0;
  761. for_each_possible_cpu(cpu)
  762. sum += *per_cpu_ptr(desc->kstat_irqs, cpu);
  763. return sum;
  764. }
  765. /**
  766. * kstat_irqs_usr - Get the statistics for an interrupt
  767. * @irq: The interrupt number
  768. *
  769. * Returns the sum of interrupt counts on all cpus since boot for
  770. * @irq. Contrary to kstat_irqs() this can be called from any
  771. * preemptible context. It's protected against concurrent removal of
  772. * an interrupt descriptor when sparse irqs are enabled.
  773. */
  774. unsigned int kstat_irqs_usr(unsigned int irq)
  775. {
  776. unsigned int sum;
  777. irq_lock_sparse();
  778. sum = kstat_irqs(irq);
  779. irq_unlock_sparse();
  780. return sum;
  781. }