irqdomain.c 48 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761
  1. // SPDX-License-Identifier: GPL-2.0
  2. #define pr_fmt(fmt) "irq: " fmt
  3. #include <linux/acpi.h>
  4. #include <linux/debugfs.h>
  5. #include <linux/hardirq.h>
  6. #include <linux/interrupt.h>
  7. #include <linux/irq.h>
  8. #include <linux/irqdesc.h>
  9. #include <linux/irqdomain.h>
  10. #include <linux/module.h>
  11. #include <linux/mutex.h>
  12. #include <linux/of.h>
  13. #include <linux/of_address.h>
  14. #include <linux/of_irq.h>
  15. #include <linux/topology.h>
  16. #include <linux/seq_file.h>
  17. #include <linux/slab.h>
  18. #include <linux/smp.h>
  19. #include <linux/fs.h>
  20. static LIST_HEAD(irq_domain_list);
  21. static DEFINE_MUTEX(irq_domain_mutex);
  22. static struct irq_domain *irq_default_domain;
  23. static void irq_domain_check_hierarchy(struct irq_domain *domain);
  24. struct irqchip_fwid {
  25. struct fwnode_handle fwnode;
  26. unsigned int type;
  27. char *name;
  28. void *data;
  29. };
  30. #ifdef CONFIG_GENERIC_IRQ_DEBUGFS
  31. static void debugfs_add_domain_dir(struct irq_domain *d);
  32. static void debugfs_remove_domain_dir(struct irq_domain *d);
  33. #else
  34. static inline void debugfs_add_domain_dir(struct irq_domain *d) { }
  35. static inline void debugfs_remove_domain_dir(struct irq_domain *d) { }
  36. #endif
  37. const struct fwnode_operations irqchip_fwnode_ops;
  38. EXPORT_SYMBOL_GPL(irqchip_fwnode_ops);
  39. /**
  40. * irq_domain_alloc_fwnode - Allocate a fwnode_handle suitable for
  41. * identifying an irq domain
  42. * @type: Type of irqchip_fwnode. See linux/irqdomain.h
  43. * @name: Optional user provided domain name
  44. * @id: Optional user provided id if name != NULL
  45. * @data: Optional user-provided data
  46. *
  47. * Allocate a struct irqchip_fwid, and return a poiner to the embedded
  48. * fwnode_handle (or NULL on failure).
  49. *
  50. * Note: The types IRQCHIP_FWNODE_NAMED and IRQCHIP_FWNODE_NAMED_ID are
  51. * solely to transport name information to irqdomain creation code. The
  52. * node is not stored. For other types the pointer is kept in the irq
  53. * domain struct.
  54. */
  55. struct fwnode_handle *__irq_domain_alloc_fwnode(unsigned int type, int id,
  56. const char *name, void *data)
  57. {
  58. struct irqchip_fwid *fwid;
  59. char *n;
  60. fwid = kzalloc(sizeof(*fwid), GFP_KERNEL);
  61. switch (type) {
  62. case IRQCHIP_FWNODE_NAMED:
  63. n = kasprintf(GFP_KERNEL, "%s", name);
  64. break;
  65. case IRQCHIP_FWNODE_NAMED_ID:
  66. n = kasprintf(GFP_KERNEL, "%s-%d", name, id);
  67. break;
  68. default:
  69. n = kasprintf(GFP_KERNEL, "irqchip@%p", data);
  70. break;
  71. }
  72. if (!fwid || !n) {
  73. kfree(fwid);
  74. kfree(n);
  75. return NULL;
  76. }
  77. fwid->type = type;
  78. fwid->name = n;
  79. fwid->data = data;
  80. fwid->fwnode.ops = &irqchip_fwnode_ops;
  81. return &fwid->fwnode;
  82. }
  83. EXPORT_SYMBOL_GPL(__irq_domain_alloc_fwnode);
  84. /**
  85. * irq_domain_free_fwnode - Free a non-OF-backed fwnode_handle
  86. *
  87. * Free a fwnode_handle allocated with irq_domain_alloc_fwnode.
  88. */
  89. void irq_domain_free_fwnode(struct fwnode_handle *fwnode)
  90. {
  91. struct irqchip_fwid *fwid;
  92. if (WARN_ON(!is_fwnode_irqchip(fwnode)))
  93. return;
  94. fwid = container_of(fwnode, struct irqchip_fwid, fwnode);
  95. kfree(fwid->name);
  96. kfree(fwid);
  97. }
  98. EXPORT_SYMBOL_GPL(irq_domain_free_fwnode);
  99. /**
  100. * __irq_domain_add() - Allocate a new irq_domain data structure
  101. * @fwnode: firmware node for the interrupt controller
  102. * @size: Size of linear map; 0 for radix mapping only
  103. * @hwirq_max: Maximum number of interrupts supported by controller
  104. * @direct_max: Maximum value of direct maps; Use ~0 for no limit; 0 for no
  105. * direct mapping
  106. * @ops: domain callbacks
  107. * @host_data: Controller private data pointer
  108. *
  109. * Allocates and initialize and irq_domain structure.
  110. * Returns pointer to IRQ domain, or NULL on failure.
  111. */
  112. struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size,
  113. irq_hw_number_t hwirq_max, int direct_max,
  114. const struct irq_domain_ops *ops,
  115. void *host_data)
  116. {
  117. struct device_node *of_node = to_of_node(fwnode);
  118. struct irqchip_fwid *fwid;
  119. struct irq_domain *domain;
  120. static atomic_t unknown_domains;
  121. domain = kzalloc_node(sizeof(*domain) + (sizeof(unsigned int) * size),
  122. GFP_KERNEL, of_node_to_nid(of_node));
  123. if (WARN_ON(!domain))
  124. return NULL;
  125. if (fwnode && is_fwnode_irqchip(fwnode)) {
  126. fwid = container_of(fwnode, struct irqchip_fwid, fwnode);
  127. switch (fwid->type) {
  128. case IRQCHIP_FWNODE_NAMED:
  129. case IRQCHIP_FWNODE_NAMED_ID:
  130. domain->name = kstrdup(fwid->name, GFP_KERNEL);
  131. if (!domain->name) {
  132. kfree(domain);
  133. return NULL;
  134. }
  135. domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
  136. break;
  137. default:
  138. domain->fwnode = fwnode;
  139. domain->name = fwid->name;
  140. break;
  141. }
  142. #ifdef CONFIG_ACPI
  143. } else if (is_acpi_device_node(fwnode)) {
  144. struct acpi_buffer buf = {
  145. .length = ACPI_ALLOCATE_BUFFER,
  146. };
  147. acpi_handle handle;
  148. handle = acpi_device_handle(to_acpi_device_node(fwnode));
  149. if (acpi_get_name(handle, ACPI_FULL_PATHNAME, &buf) == AE_OK) {
  150. domain->name = buf.pointer;
  151. domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
  152. }
  153. domain->fwnode = fwnode;
  154. #endif
  155. } else if (of_node) {
  156. char *name;
  157. /*
  158. * DT paths contain '/', which debugfs is legitimately
  159. * unhappy about. Replace them with ':', which does
  160. * the trick and is not as offensive as '\'...
  161. */
  162. name = kstrdup(of_node_full_name(of_node), GFP_KERNEL);
  163. if (!name) {
  164. kfree(domain);
  165. return NULL;
  166. }
  167. strreplace(name, '/', ':');
  168. domain->name = name;
  169. domain->fwnode = fwnode;
  170. domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
  171. }
  172. if (!domain->name) {
  173. if (fwnode)
  174. pr_err("Invalid fwnode type for irqdomain\n");
  175. domain->name = kasprintf(GFP_KERNEL, "unknown-%d",
  176. atomic_inc_return(&unknown_domains));
  177. if (!domain->name) {
  178. kfree(domain);
  179. return NULL;
  180. }
  181. domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
  182. }
  183. of_node_get(of_node);
  184. /* Fill structure */
  185. INIT_RADIX_TREE(&domain->revmap_tree, GFP_KERNEL);
  186. mutex_init(&domain->revmap_tree_mutex);
  187. domain->ops = ops;
  188. domain->host_data = host_data;
  189. domain->hwirq_max = hwirq_max;
  190. domain->revmap_size = size;
  191. domain->revmap_direct_max_irq = direct_max;
  192. irq_domain_check_hierarchy(domain);
  193. mutex_lock(&irq_domain_mutex);
  194. debugfs_add_domain_dir(domain);
  195. list_add(&domain->link, &irq_domain_list);
  196. mutex_unlock(&irq_domain_mutex);
  197. pr_debug("Added domain %s\n", domain->name);
  198. return domain;
  199. }
  200. EXPORT_SYMBOL_GPL(__irq_domain_add);
  201. /**
  202. * irq_domain_remove() - Remove an irq domain.
  203. * @domain: domain to remove
  204. *
  205. * This routine is used to remove an irq domain. The caller must ensure
  206. * that all mappings within the domain have been disposed of prior to
  207. * use, depending on the revmap type.
  208. */
  209. void irq_domain_remove(struct irq_domain *domain)
  210. {
  211. mutex_lock(&irq_domain_mutex);
  212. debugfs_remove_domain_dir(domain);
  213. WARN_ON(!radix_tree_empty(&domain->revmap_tree));
  214. list_del(&domain->link);
  215. /*
  216. * If the going away domain is the default one, reset it.
  217. */
  218. if (unlikely(irq_default_domain == domain))
  219. irq_set_default_host(NULL);
  220. mutex_unlock(&irq_domain_mutex);
  221. pr_debug("Removed domain %s\n", domain->name);
  222. of_node_put(irq_domain_get_of_node(domain));
  223. if (domain->flags & IRQ_DOMAIN_NAME_ALLOCATED)
  224. kfree(domain->name);
  225. kfree(domain);
  226. }
  227. EXPORT_SYMBOL_GPL(irq_domain_remove);
  228. void irq_domain_update_bus_token(struct irq_domain *domain,
  229. enum irq_domain_bus_token bus_token)
  230. {
  231. char *name;
  232. if (domain->bus_token == bus_token)
  233. return;
  234. mutex_lock(&irq_domain_mutex);
  235. domain->bus_token = bus_token;
  236. name = kasprintf(GFP_KERNEL, "%s-%d", domain->name, bus_token);
  237. if (!name) {
  238. mutex_unlock(&irq_domain_mutex);
  239. return;
  240. }
  241. debugfs_remove_domain_dir(domain);
  242. if (domain->flags & IRQ_DOMAIN_NAME_ALLOCATED)
  243. kfree(domain->name);
  244. else
  245. domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
  246. domain->name = name;
  247. debugfs_add_domain_dir(domain);
  248. mutex_unlock(&irq_domain_mutex);
  249. }
  250. /**
  251. * irq_domain_add_simple() - Register an irq_domain and optionally map a range of irqs
  252. * @of_node: pointer to interrupt controller's device tree node.
  253. * @size: total number of irqs in mapping
  254. * @first_irq: first number of irq block assigned to the domain,
  255. * pass zero to assign irqs on-the-fly. If first_irq is non-zero, then
  256. * pre-map all of the irqs in the domain to virqs starting at first_irq.
  257. * @ops: domain callbacks
  258. * @host_data: Controller private data pointer
  259. *
  260. * Allocates an irq_domain, and optionally if first_irq is positive then also
  261. * allocate irq_descs and map all of the hwirqs to virqs starting at first_irq.
  262. *
  263. * This is intended to implement the expected behaviour for most
  264. * interrupt controllers. If device tree is used, then first_irq will be 0 and
  265. * irqs get mapped dynamically on the fly. However, if the controller requires
  266. * static virq assignments (non-DT boot) then it will set that up correctly.
  267. */
  268. struct irq_domain *irq_domain_add_simple(struct device_node *of_node,
  269. unsigned int size,
  270. unsigned int first_irq,
  271. const struct irq_domain_ops *ops,
  272. void *host_data)
  273. {
  274. struct irq_domain *domain;
  275. domain = __irq_domain_add(of_node_to_fwnode(of_node), size, size, 0, ops, host_data);
  276. if (!domain)
  277. return NULL;
  278. if (first_irq > 0) {
  279. if (IS_ENABLED(CONFIG_SPARSE_IRQ)) {
  280. /* attempt to allocated irq_descs */
  281. int rc = irq_alloc_descs(first_irq, first_irq, size,
  282. of_node_to_nid(of_node));
  283. if (rc < 0)
  284. pr_info("Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n",
  285. first_irq);
  286. }
  287. irq_domain_associate_many(domain, first_irq, 0, size);
  288. }
  289. return domain;
  290. }
  291. EXPORT_SYMBOL_GPL(irq_domain_add_simple);
  292. /**
  293. * irq_domain_add_legacy() - Allocate and register a legacy revmap irq_domain.
  294. * @of_node: pointer to interrupt controller's device tree node.
  295. * @size: total number of irqs in legacy mapping
  296. * @first_irq: first number of irq block assigned to the domain
  297. * @first_hwirq: first hwirq number to use for the translation. Should normally
  298. * be '0', but a positive integer can be used if the effective
  299. * hwirqs numbering does not begin at zero.
  300. * @ops: map/unmap domain callbacks
  301. * @host_data: Controller private data pointer
  302. *
  303. * Note: the map() callback will be called before this function returns
  304. * for all legacy interrupts except 0 (which is always the invalid irq for
  305. * a legacy controller).
  306. */
  307. struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
  308. unsigned int size,
  309. unsigned int first_irq,
  310. irq_hw_number_t first_hwirq,
  311. const struct irq_domain_ops *ops,
  312. void *host_data)
  313. {
  314. struct irq_domain *domain;
  315. domain = __irq_domain_add(of_node_to_fwnode(of_node), first_hwirq + size,
  316. first_hwirq + size, 0, ops, host_data);
  317. if (domain)
  318. irq_domain_associate_many(domain, first_irq, first_hwirq, size);
  319. return domain;
  320. }
  321. EXPORT_SYMBOL_GPL(irq_domain_add_legacy);
  322. /**
  323. * irq_find_matching_fwspec() - Locates a domain for a given fwspec
  324. * @fwspec: FW specifier for an interrupt
  325. * @bus_token: domain-specific data
  326. */
  327. struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec,
  328. enum irq_domain_bus_token bus_token)
  329. {
  330. struct irq_domain *h, *found = NULL;
  331. struct fwnode_handle *fwnode = fwspec->fwnode;
  332. int rc;
  333. /* We might want to match the legacy controller last since
  334. * it might potentially be set to match all interrupts in
  335. * the absence of a device node. This isn't a problem so far
  336. * yet though...
  337. *
  338. * bus_token == DOMAIN_BUS_ANY matches any domain, any other
  339. * values must generate an exact match for the domain to be
  340. * selected.
  341. */
  342. mutex_lock(&irq_domain_mutex);
  343. list_for_each_entry(h, &irq_domain_list, link) {
  344. if (h->ops->select && fwspec->param_count)
  345. rc = h->ops->select(h, fwspec, bus_token);
  346. else if (h->ops->match)
  347. rc = h->ops->match(h, to_of_node(fwnode), bus_token);
  348. else
  349. rc = ((fwnode != NULL) && (h->fwnode == fwnode) &&
  350. ((bus_token == DOMAIN_BUS_ANY) ||
  351. (h->bus_token == bus_token)));
  352. if (rc) {
  353. found = h;
  354. break;
  355. }
  356. }
  357. mutex_unlock(&irq_domain_mutex);
  358. return found;
  359. }
  360. EXPORT_SYMBOL_GPL(irq_find_matching_fwspec);
  361. /**
  362. * irq_domain_check_msi_remap - Check whether all MSI irq domains implement
  363. * IRQ remapping
  364. *
  365. * Return: false if any MSI irq domain does not support IRQ remapping,
  366. * true otherwise (including if there is no MSI irq domain)
  367. */
  368. bool irq_domain_check_msi_remap(void)
  369. {
  370. struct irq_domain *h;
  371. bool ret = true;
  372. mutex_lock(&irq_domain_mutex);
  373. list_for_each_entry(h, &irq_domain_list, link) {
  374. if (irq_domain_is_msi(h) &&
  375. !irq_domain_hierarchical_is_msi_remap(h)) {
  376. ret = false;
  377. break;
  378. }
  379. }
  380. mutex_unlock(&irq_domain_mutex);
  381. return ret;
  382. }
  383. EXPORT_SYMBOL_GPL(irq_domain_check_msi_remap);
  384. /**
  385. * irq_set_default_host() - Set a "default" irq domain
  386. * @domain: default domain pointer
  387. *
  388. * For convenience, it's possible to set a "default" domain that will be used
  389. * whenever NULL is passed to irq_create_mapping(). It makes life easier for
  390. * platforms that want to manipulate a few hard coded interrupt numbers that
  391. * aren't properly represented in the device-tree.
  392. */
  393. void irq_set_default_host(struct irq_domain *domain)
  394. {
  395. pr_debug("Default domain set to @0x%p\n", domain);
  396. irq_default_domain = domain;
  397. }
  398. EXPORT_SYMBOL_GPL(irq_set_default_host);
  399. static void irq_domain_clear_mapping(struct irq_domain *domain,
  400. irq_hw_number_t hwirq)
  401. {
  402. if (hwirq < domain->revmap_size) {
  403. domain->linear_revmap[hwirq] = 0;
  404. } else {
  405. mutex_lock(&domain->revmap_tree_mutex);
  406. radix_tree_delete(&domain->revmap_tree, hwirq);
  407. mutex_unlock(&domain->revmap_tree_mutex);
  408. }
  409. }
  410. static void irq_domain_set_mapping(struct irq_domain *domain,
  411. irq_hw_number_t hwirq,
  412. struct irq_data *irq_data)
  413. {
  414. if (hwirq < domain->revmap_size) {
  415. domain->linear_revmap[hwirq] = irq_data->irq;
  416. } else {
  417. mutex_lock(&domain->revmap_tree_mutex);
  418. radix_tree_insert(&domain->revmap_tree, hwirq, irq_data);
  419. mutex_unlock(&domain->revmap_tree_mutex);
  420. }
  421. }
  422. void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq)
  423. {
  424. struct irq_data *irq_data = irq_get_irq_data(irq);
  425. irq_hw_number_t hwirq;
  426. if (WARN(!irq_data || irq_data->domain != domain,
  427. "virq%i doesn't exist; cannot disassociate\n", irq))
  428. return;
  429. hwirq = irq_data->hwirq;
  430. irq_set_status_flags(irq, IRQ_NOREQUEST);
  431. /* remove chip and handler */
  432. irq_set_chip_and_handler(irq, NULL, NULL);
  433. /* Make sure it's completed */
  434. synchronize_irq(irq);
  435. /* Tell the PIC about it */
  436. if (domain->ops->unmap)
  437. domain->ops->unmap(domain, irq);
  438. smp_mb();
  439. irq_data->domain = NULL;
  440. irq_data->hwirq = 0;
  441. domain->mapcount--;
  442. /* Clear reverse map for this hwirq */
  443. irq_domain_clear_mapping(domain, hwirq);
  444. }
  445. int irq_domain_associate(struct irq_domain *domain, unsigned int virq,
  446. irq_hw_number_t hwirq)
  447. {
  448. struct irq_data *irq_data = irq_get_irq_data(virq);
  449. int ret;
  450. if (WARN(hwirq >= domain->hwirq_max,
  451. "error: hwirq 0x%x is too large for %s\n", (int)hwirq, domain->name))
  452. return -EINVAL;
  453. if (WARN(!irq_data, "error: virq%i is not allocated", virq))
  454. return -EINVAL;
  455. if (WARN(irq_data->domain, "error: virq%i is already associated", virq))
  456. return -EINVAL;
  457. mutex_lock(&irq_domain_mutex);
  458. irq_data->hwirq = hwirq;
  459. irq_data->domain = domain;
  460. if (domain->ops->map) {
  461. ret = domain->ops->map(domain, virq, hwirq);
  462. if (ret != 0) {
  463. /*
  464. * If map() returns -EPERM, this interrupt is protected
  465. * by the firmware or some other service and shall not
  466. * be mapped. Don't bother telling the user about it.
  467. */
  468. if (ret != -EPERM) {
  469. pr_info("%s didn't like hwirq-0x%lx to VIRQ%i mapping (rc=%d)\n",
  470. domain->name, hwirq, virq, ret);
  471. }
  472. irq_data->domain = NULL;
  473. irq_data->hwirq = 0;
  474. mutex_unlock(&irq_domain_mutex);
  475. return ret;
  476. }
  477. /* If not already assigned, give the domain the chip's name */
  478. if (!domain->name && irq_data->chip)
  479. domain->name = irq_data->chip->name;
  480. }
  481. domain->mapcount++;
  482. irq_domain_set_mapping(domain, hwirq, irq_data);
  483. mutex_unlock(&irq_domain_mutex);
  484. irq_clear_status_flags(virq, IRQ_NOREQUEST);
  485. return 0;
  486. }
  487. EXPORT_SYMBOL_GPL(irq_domain_associate);
  488. void irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base,
  489. irq_hw_number_t hwirq_base, int count)
  490. {
  491. struct device_node *of_node;
  492. int i;
  493. of_node = irq_domain_get_of_node(domain);
  494. pr_debug("%s(%s, irqbase=%i, hwbase=%i, count=%i)\n", __func__,
  495. of_node_full_name(of_node), irq_base, (int)hwirq_base, count);
  496. for (i = 0; i < count; i++) {
  497. irq_domain_associate(domain, irq_base + i, hwirq_base + i);
  498. }
  499. }
  500. EXPORT_SYMBOL_GPL(irq_domain_associate_many);
  501. /**
  502. * irq_create_direct_mapping() - Allocate an irq for direct mapping
  503. * @domain: domain to allocate the irq for or NULL for default domain
  504. *
  505. * This routine is used for irq controllers which can choose the hardware
  506. * interrupt numbers they generate. In such a case it's simplest to use
  507. * the linux irq as the hardware interrupt number. It still uses the linear
  508. * or radix tree to store the mapping, but the irq controller can optimize
  509. * the revmap path by using the hwirq directly.
  510. */
  511. unsigned int irq_create_direct_mapping(struct irq_domain *domain)
  512. {
  513. struct device_node *of_node;
  514. unsigned int virq;
  515. if (domain == NULL)
  516. domain = irq_default_domain;
  517. of_node = irq_domain_get_of_node(domain);
  518. virq = irq_alloc_desc_from(1, of_node_to_nid(of_node));
  519. if (!virq) {
  520. pr_debug("create_direct virq allocation failed\n");
  521. return 0;
  522. }
  523. if (virq >= domain->revmap_direct_max_irq) {
  524. pr_err("ERROR: no free irqs available below %i maximum\n",
  525. domain->revmap_direct_max_irq);
  526. irq_free_desc(virq);
  527. return 0;
  528. }
  529. pr_debug("create_direct obtained virq %d\n", virq);
  530. if (irq_domain_associate(domain, virq, virq)) {
  531. irq_free_desc(virq);
  532. return 0;
  533. }
  534. return virq;
  535. }
  536. EXPORT_SYMBOL_GPL(irq_create_direct_mapping);
  537. /**
  538. * irq_create_mapping() - Map a hardware interrupt into linux irq space
  539. * @domain: domain owning this hardware interrupt or NULL for default domain
  540. * @hwirq: hardware irq number in that domain space
  541. *
  542. * Only one mapping per hardware interrupt is permitted. Returns a linux
  543. * irq number.
  544. * If the sense/trigger is to be specified, set_irq_type() should be called
  545. * on the number returned from that call.
  546. */
  547. unsigned int irq_create_mapping(struct irq_domain *domain,
  548. irq_hw_number_t hwirq)
  549. {
  550. struct device_node *of_node;
  551. int virq;
  552. pr_debug("irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq);
  553. /* Look for default domain if nececssary */
  554. if (domain == NULL)
  555. domain = irq_default_domain;
  556. if (domain == NULL) {
  557. WARN(1, "%s(, %lx) called with NULL domain\n", __func__, hwirq);
  558. return 0;
  559. }
  560. pr_debug("-> using domain @%p\n", domain);
  561. of_node = irq_domain_get_of_node(domain);
  562. /* Check if mapping already exists */
  563. virq = irq_find_mapping(domain, hwirq);
  564. if (virq) {
  565. pr_debug("-> existing mapping on virq %d\n", virq);
  566. return virq;
  567. }
  568. /* Allocate a virtual interrupt number */
  569. virq = irq_domain_alloc_descs(-1, 1, hwirq, of_node_to_nid(of_node), NULL);
  570. if (virq <= 0) {
  571. pr_debug("-> virq allocation failed\n");
  572. return 0;
  573. }
  574. if (irq_domain_associate(domain, virq, hwirq)) {
  575. irq_free_desc(virq);
  576. return 0;
  577. }
  578. pr_debug("irq %lu on domain %s mapped to virtual irq %u\n",
  579. hwirq, of_node_full_name(of_node), virq);
  580. return virq;
  581. }
  582. EXPORT_SYMBOL_GPL(irq_create_mapping);
  583. /**
  584. * irq_create_strict_mappings() - Map a range of hw irqs to fixed linux irqs
  585. * @domain: domain owning the interrupt range
  586. * @irq_base: beginning of linux IRQ range
  587. * @hwirq_base: beginning of hardware IRQ range
  588. * @count: Number of interrupts to map
  589. *
  590. * This routine is used for allocating and mapping a range of hardware
  591. * irqs to linux irqs where the linux irq numbers are at pre-defined
  592. * locations. For use by controllers that already have static mappings
  593. * to insert in to the domain.
  594. *
  595. * Non-linear users can use irq_create_identity_mapping() for IRQ-at-a-time
  596. * domain insertion.
  597. *
  598. * 0 is returned upon success, while any failure to establish a static
  599. * mapping is treated as an error.
  600. */
  601. int irq_create_strict_mappings(struct irq_domain *domain, unsigned int irq_base,
  602. irq_hw_number_t hwirq_base, int count)
  603. {
  604. struct device_node *of_node;
  605. int ret;
  606. of_node = irq_domain_get_of_node(domain);
  607. ret = irq_alloc_descs(irq_base, irq_base, count,
  608. of_node_to_nid(of_node));
  609. if (unlikely(ret < 0))
  610. return ret;
  611. irq_domain_associate_many(domain, irq_base, hwirq_base, count);
  612. return 0;
  613. }
  614. EXPORT_SYMBOL_GPL(irq_create_strict_mappings);
  615. static int irq_domain_translate(struct irq_domain *d,
  616. struct irq_fwspec *fwspec,
  617. irq_hw_number_t *hwirq, unsigned int *type)
  618. {
  619. #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
  620. if (d->ops->translate)
  621. return d->ops->translate(d, fwspec, hwirq, type);
  622. #endif
  623. if (d->ops->xlate)
  624. return d->ops->xlate(d, to_of_node(fwspec->fwnode),
  625. fwspec->param, fwspec->param_count,
  626. hwirq, type);
  627. /* If domain has no translation, then we assume interrupt line */
  628. *hwirq = fwspec->param[0];
  629. return 0;
  630. }
  631. static void of_phandle_args_to_fwspec(struct of_phandle_args *irq_data,
  632. struct irq_fwspec *fwspec)
  633. {
  634. int i;
  635. fwspec->fwnode = irq_data->np ? &irq_data->np->fwnode : NULL;
  636. fwspec->param_count = irq_data->args_count;
  637. for (i = 0; i < irq_data->args_count; i++)
  638. fwspec->param[i] = irq_data->args[i];
  639. }
  640. unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec)
  641. {
  642. struct irq_domain *domain;
  643. struct irq_data *irq_data;
  644. irq_hw_number_t hwirq;
  645. unsigned int type = IRQ_TYPE_NONE;
  646. int virq;
  647. if (fwspec->fwnode) {
  648. domain = irq_find_matching_fwspec(fwspec, DOMAIN_BUS_WIRED);
  649. if (!domain)
  650. domain = irq_find_matching_fwspec(fwspec, DOMAIN_BUS_ANY);
  651. } else {
  652. domain = irq_default_domain;
  653. }
  654. if (!domain) {
  655. pr_warn("no irq domain found for %s !\n",
  656. of_node_full_name(to_of_node(fwspec->fwnode)));
  657. return 0;
  658. }
  659. if (irq_domain_translate(domain, fwspec, &hwirq, &type))
  660. return 0;
  661. /*
  662. * WARN if the irqchip returns a type with bits
  663. * outside the sense mask set and clear these bits.
  664. */
  665. if (WARN_ON(type & ~IRQ_TYPE_SENSE_MASK))
  666. type &= IRQ_TYPE_SENSE_MASK;
  667. /*
  668. * If we've already configured this interrupt,
  669. * don't do it again, or hell will break loose.
  670. */
  671. virq = irq_find_mapping(domain, hwirq);
  672. if (virq) {
  673. /*
  674. * If the trigger type is not specified or matches the
  675. * current trigger type then we are done so return the
  676. * interrupt number.
  677. */
  678. if (type == IRQ_TYPE_NONE || type == irq_get_trigger_type(virq))
  679. return virq;
  680. /*
  681. * If the trigger type has not been set yet, then set
  682. * it now and return the interrupt number.
  683. */
  684. if (irq_get_trigger_type(virq) == IRQ_TYPE_NONE) {
  685. irq_data = irq_get_irq_data(virq);
  686. if (!irq_data)
  687. return 0;
  688. irqd_set_trigger_type(irq_data, type);
  689. return virq;
  690. }
  691. pr_warn("type mismatch, failed to map hwirq-%lu for %s!\n",
  692. hwirq, of_node_full_name(to_of_node(fwspec->fwnode)));
  693. return 0;
  694. }
  695. if (irq_domain_is_hierarchy(domain)) {
  696. virq = irq_domain_alloc_irqs(domain, 1, NUMA_NO_NODE, fwspec);
  697. if (virq <= 0)
  698. return 0;
  699. } else {
  700. /* Create mapping */
  701. virq = irq_create_mapping(domain, hwirq);
  702. if (!virq)
  703. return virq;
  704. }
  705. irq_data = irq_get_irq_data(virq);
  706. if (!irq_data) {
  707. if (irq_domain_is_hierarchy(domain))
  708. irq_domain_free_irqs(virq, 1);
  709. else
  710. irq_dispose_mapping(virq);
  711. return 0;
  712. }
  713. /* Store trigger type */
  714. irqd_set_trigger_type(irq_data, type);
  715. return virq;
  716. }
  717. EXPORT_SYMBOL_GPL(irq_create_fwspec_mapping);
  718. unsigned int irq_create_of_mapping(struct of_phandle_args *irq_data)
  719. {
  720. struct irq_fwspec fwspec;
  721. of_phandle_args_to_fwspec(irq_data, &fwspec);
  722. return irq_create_fwspec_mapping(&fwspec);
  723. }
  724. EXPORT_SYMBOL_GPL(irq_create_of_mapping);
  725. /**
  726. * irq_dispose_mapping() - Unmap an interrupt
  727. * @virq: linux irq number of the interrupt to unmap
  728. */
  729. void irq_dispose_mapping(unsigned int virq)
  730. {
  731. struct irq_data *irq_data = irq_get_irq_data(virq);
  732. struct irq_domain *domain;
  733. if (!virq || !irq_data)
  734. return;
  735. domain = irq_data->domain;
  736. if (WARN_ON(domain == NULL))
  737. return;
  738. if (irq_domain_is_hierarchy(domain)) {
  739. irq_domain_free_irqs(virq, 1);
  740. } else {
  741. irq_domain_disassociate(domain, virq);
  742. irq_free_desc(virq);
  743. }
  744. }
  745. EXPORT_SYMBOL_GPL(irq_dispose_mapping);
  746. /**
  747. * irq_find_mapping() - Find a linux irq from an hw irq number.
  748. * @domain: domain owning this hardware interrupt
  749. * @hwirq: hardware irq number in that domain space
  750. */
  751. unsigned int irq_find_mapping(struct irq_domain *domain,
  752. irq_hw_number_t hwirq)
  753. {
  754. struct irq_data *data;
  755. /* Look for default domain if nececssary */
  756. if (domain == NULL)
  757. domain = irq_default_domain;
  758. if (domain == NULL)
  759. return 0;
  760. if (hwirq < domain->revmap_direct_max_irq) {
  761. data = irq_domain_get_irq_data(domain, hwirq);
  762. if (data && data->hwirq == hwirq)
  763. return hwirq;
  764. }
  765. /* Check if the hwirq is in the linear revmap. */
  766. if (hwirq < domain->revmap_size)
  767. return domain->linear_revmap[hwirq];
  768. rcu_read_lock();
  769. data = radix_tree_lookup(&domain->revmap_tree, hwirq);
  770. rcu_read_unlock();
  771. return data ? data->irq : 0;
  772. }
  773. EXPORT_SYMBOL_GPL(irq_find_mapping);
  774. /**
  775. * irq_domain_xlate_onecell() - Generic xlate for direct one cell bindings
  776. *
  777. * Device Tree IRQ specifier translation function which works with one cell
  778. * bindings where the cell value maps directly to the hwirq number.
  779. */
  780. int irq_domain_xlate_onecell(struct irq_domain *d, struct device_node *ctrlr,
  781. const u32 *intspec, unsigned int intsize,
  782. unsigned long *out_hwirq, unsigned int *out_type)
  783. {
  784. if (WARN_ON(intsize < 1))
  785. return -EINVAL;
  786. *out_hwirq = intspec[0];
  787. *out_type = IRQ_TYPE_NONE;
  788. return 0;
  789. }
  790. EXPORT_SYMBOL_GPL(irq_domain_xlate_onecell);
  791. /**
  792. * irq_domain_xlate_twocell() - Generic xlate for direct two cell bindings
  793. *
  794. * Device Tree IRQ specifier translation function which works with two cell
  795. * bindings where the cell values map directly to the hwirq number
  796. * and linux irq flags.
  797. */
  798. int irq_domain_xlate_twocell(struct irq_domain *d, struct device_node *ctrlr,
  799. const u32 *intspec, unsigned int intsize,
  800. irq_hw_number_t *out_hwirq, unsigned int *out_type)
  801. {
  802. if (WARN_ON(intsize < 2))
  803. return -EINVAL;
  804. *out_hwirq = intspec[0];
  805. *out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
  806. return 0;
  807. }
  808. EXPORT_SYMBOL_GPL(irq_domain_xlate_twocell);
  809. /**
  810. * irq_domain_xlate_onetwocell() - Generic xlate for one or two cell bindings
  811. *
  812. * Device Tree IRQ specifier translation function which works with either one
  813. * or two cell bindings where the cell values map directly to the hwirq number
  814. * and linux irq flags.
  815. *
  816. * Note: don't use this function unless your interrupt controller explicitly
  817. * supports both one and two cell bindings. For the majority of controllers
  818. * the _onecell() or _twocell() variants above should be used.
  819. */
  820. int irq_domain_xlate_onetwocell(struct irq_domain *d,
  821. struct device_node *ctrlr,
  822. const u32 *intspec, unsigned int intsize,
  823. unsigned long *out_hwirq, unsigned int *out_type)
  824. {
  825. if (WARN_ON(intsize < 1))
  826. return -EINVAL;
  827. *out_hwirq = intspec[0];
  828. if (intsize > 1)
  829. *out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
  830. else
  831. *out_type = IRQ_TYPE_NONE;
  832. return 0;
  833. }
  834. EXPORT_SYMBOL_GPL(irq_domain_xlate_onetwocell);
  835. const struct irq_domain_ops irq_domain_simple_ops = {
  836. .xlate = irq_domain_xlate_onetwocell,
  837. };
  838. EXPORT_SYMBOL_GPL(irq_domain_simple_ops);
  839. int irq_domain_alloc_descs(int virq, unsigned int cnt, irq_hw_number_t hwirq,
  840. int node, const struct cpumask *affinity)
  841. {
  842. unsigned int hint;
  843. if (virq >= 0) {
  844. virq = __irq_alloc_descs(virq, virq, cnt, node, THIS_MODULE,
  845. affinity);
  846. } else {
  847. hint = hwirq % nr_irqs;
  848. if (hint == 0)
  849. hint++;
  850. virq = __irq_alloc_descs(-1, hint, cnt, node, THIS_MODULE,
  851. affinity);
  852. if (virq <= 0 && hint > 1) {
  853. virq = __irq_alloc_descs(-1, 1, cnt, node, THIS_MODULE,
  854. affinity);
  855. }
  856. }
  857. return virq;
  858. }
  859. #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
  860. /**
  861. * irq_domain_create_hierarchy - Add a irqdomain into the hierarchy
  862. * @parent: Parent irq domain to associate with the new domain
  863. * @flags: Irq domain flags associated to the domain
  864. * @size: Size of the domain. See below
  865. * @fwnode: Optional fwnode of the interrupt controller
  866. * @ops: Pointer to the interrupt domain callbacks
  867. * @host_data: Controller private data pointer
  868. *
  869. * If @size is 0 a tree domain is created, otherwise a linear domain.
  870. *
  871. * If successful the parent is associated to the new domain and the
  872. * domain flags are set.
  873. * Returns pointer to IRQ domain, or NULL on failure.
  874. */
  875. struct irq_domain *irq_domain_create_hierarchy(struct irq_domain *parent,
  876. unsigned int flags,
  877. unsigned int size,
  878. struct fwnode_handle *fwnode,
  879. const struct irq_domain_ops *ops,
  880. void *host_data)
  881. {
  882. struct irq_domain *domain;
  883. if (size)
  884. domain = irq_domain_create_linear(fwnode, size, ops, host_data);
  885. else
  886. domain = irq_domain_create_tree(fwnode, ops, host_data);
  887. if (domain) {
  888. domain->parent = parent;
  889. domain->flags |= flags;
  890. }
  891. return domain;
  892. }
  893. EXPORT_SYMBOL_GPL(irq_domain_create_hierarchy);
  894. static void irq_domain_insert_irq(int virq)
  895. {
  896. struct irq_data *data;
  897. for (data = irq_get_irq_data(virq); data; data = data->parent_data) {
  898. struct irq_domain *domain = data->domain;
  899. domain->mapcount++;
  900. irq_domain_set_mapping(domain, data->hwirq, data);
  901. /* If not already assigned, give the domain the chip's name */
  902. if (!domain->name && data->chip)
  903. domain->name = data->chip->name;
  904. }
  905. irq_clear_status_flags(virq, IRQ_NOREQUEST);
  906. }
  907. static void irq_domain_remove_irq(int virq)
  908. {
  909. struct irq_data *data;
  910. irq_set_status_flags(virq, IRQ_NOREQUEST);
  911. irq_set_chip_and_handler(virq, NULL, NULL);
  912. synchronize_irq(virq);
  913. smp_mb();
  914. for (data = irq_get_irq_data(virq); data; data = data->parent_data) {
  915. struct irq_domain *domain = data->domain;
  916. irq_hw_number_t hwirq = data->hwirq;
  917. domain->mapcount--;
  918. irq_domain_clear_mapping(domain, hwirq);
  919. }
  920. }
  921. static struct irq_data *irq_domain_insert_irq_data(struct irq_domain *domain,
  922. struct irq_data *child)
  923. {
  924. struct irq_data *irq_data;
  925. irq_data = kzalloc_node(sizeof(*irq_data), GFP_KERNEL,
  926. irq_data_get_node(child));
  927. if (irq_data) {
  928. child->parent_data = irq_data;
  929. irq_data->irq = child->irq;
  930. irq_data->common = child->common;
  931. irq_data->domain = domain;
  932. }
  933. return irq_data;
  934. }
  935. static void irq_domain_free_irq_data(unsigned int virq, unsigned int nr_irqs)
  936. {
  937. struct irq_data *irq_data, *tmp;
  938. int i;
  939. for (i = 0; i < nr_irqs; i++) {
  940. irq_data = irq_get_irq_data(virq + i);
  941. tmp = irq_data->parent_data;
  942. irq_data->parent_data = NULL;
  943. irq_data->domain = NULL;
  944. while (tmp) {
  945. irq_data = tmp;
  946. tmp = tmp->parent_data;
  947. kfree(irq_data);
  948. }
  949. }
  950. }
  951. static int irq_domain_alloc_irq_data(struct irq_domain *domain,
  952. unsigned int virq, unsigned int nr_irqs)
  953. {
  954. struct irq_data *irq_data;
  955. struct irq_domain *parent;
  956. int i;
  957. /* The outermost irq_data is embedded in struct irq_desc */
  958. for (i = 0; i < nr_irqs; i++) {
  959. irq_data = irq_get_irq_data(virq + i);
  960. irq_data->domain = domain;
  961. for (parent = domain->parent; parent; parent = parent->parent) {
  962. irq_data = irq_domain_insert_irq_data(parent, irq_data);
  963. if (!irq_data) {
  964. irq_domain_free_irq_data(virq, i + 1);
  965. return -ENOMEM;
  966. }
  967. }
  968. }
  969. return 0;
  970. }
  971. /**
  972. * irq_domain_get_irq_data - Get irq_data associated with @virq and @domain
  973. * @domain: domain to match
  974. * @virq: IRQ number to get irq_data
  975. */
  976. struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain,
  977. unsigned int virq)
  978. {
  979. struct irq_data *irq_data;
  980. for (irq_data = irq_get_irq_data(virq); irq_data;
  981. irq_data = irq_data->parent_data)
  982. if (irq_data->domain == domain)
  983. return irq_data;
  984. return NULL;
  985. }
  986. EXPORT_SYMBOL_GPL(irq_domain_get_irq_data);
  987. /**
  988. * irq_domain_set_hwirq_and_chip - Set hwirq and irqchip of @virq at @domain
  989. * @domain: Interrupt domain to match
  990. * @virq: IRQ number
  991. * @hwirq: The hwirq number
  992. * @chip: The associated interrupt chip
  993. * @chip_data: The associated chip data
  994. */
  995. int irq_domain_set_hwirq_and_chip(struct irq_domain *domain, unsigned int virq,
  996. irq_hw_number_t hwirq, struct irq_chip *chip,
  997. void *chip_data)
  998. {
  999. struct irq_data *irq_data = irq_domain_get_irq_data(domain, virq);
  1000. if (!irq_data)
  1001. return -ENOENT;
  1002. irq_data->hwirq = hwirq;
  1003. irq_data->chip = chip ? chip : &no_irq_chip;
  1004. irq_data->chip_data = chip_data;
  1005. return 0;
  1006. }
  1007. EXPORT_SYMBOL_GPL(irq_domain_set_hwirq_and_chip);
  1008. /**
  1009. * irq_domain_set_info - Set the complete data for a @virq in @domain
  1010. * @domain: Interrupt domain to match
  1011. * @virq: IRQ number
  1012. * @hwirq: The hardware interrupt number
  1013. * @chip: The associated interrupt chip
  1014. * @chip_data: The associated interrupt chip data
  1015. * @handler: The interrupt flow handler
  1016. * @handler_data: The interrupt flow handler data
  1017. * @handler_name: The interrupt handler name
  1018. */
  1019. void irq_domain_set_info(struct irq_domain *domain, unsigned int virq,
  1020. irq_hw_number_t hwirq, struct irq_chip *chip,
  1021. void *chip_data, irq_flow_handler_t handler,
  1022. void *handler_data, const char *handler_name)
  1023. {
  1024. irq_domain_set_hwirq_and_chip(domain, virq, hwirq, chip, chip_data);
  1025. __irq_set_handler(virq, handler, 0, handler_name);
  1026. irq_set_handler_data(virq, handler_data);
  1027. }
  1028. EXPORT_SYMBOL(irq_domain_set_info);
  1029. /**
  1030. * irq_domain_reset_irq_data - Clear hwirq, chip and chip_data in @irq_data
  1031. * @irq_data: The pointer to irq_data
  1032. */
  1033. void irq_domain_reset_irq_data(struct irq_data *irq_data)
  1034. {
  1035. irq_data->hwirq = 0;
  1036. irq_data->chip = &no_irq_chip;
  1037. irq_data->chip_data = NULL;
  1038. }
  1039. EXPORT_SYMBOL_GPL(irq_domain_reset_irq_data);
  1040. /**
  1041. * irq_domain_free_irqs_common - Clear irq_data and free the parent
  1042. * @domain: Interrupt domain to match
  1043. * @virq: IRQ number to start with
  1044. * @nr_irqs: The number of irqs to free
  1045. */
  1046. void irq_domain_free_irqs_common(struct irq_domain *domain, unsigned int virq,
  1047. unsigned int nr_irqs)
  1048. {
  1049. struct irq_data *irq_data;
  1050. int i;
  1051. for (i = 0; i < nr_irqs; i++) {
  1052. irq_data = irq_domain_get_irq_data(domain, virq + i);
  1053. if (irq_data)
  1054. irq_domain_reset_irq_data(irq_data);
  1055. }
  1056. irq_domain_free_irqs_parent(domain, virq, nr_irqs);
  1057. }
  1058. EXPORT_SYMBOL_GPL(irq_domain_free_irqs_common);
  1059. /**
  1060. * irq_domain_free_irqs_top - Clear handler and handler data, clear irqdata and free parent
  1061. * @domain: Interrupt domain to match
  1062. * @virq: IRQ number to start with
  1063. * @nr_irqs: The number of irqs to free
  1064. */
  1065. void irq_domain_free_irqs_top(struct irq_domain *domain, unsigned int virq,
  1066. unsigned int nr_irqs)
  1067. {
  1068. int i;
  1069. for (i = 0; i < nr_irqs; i++) {
  1070. irq_set_handler_data(virq + i, NULL);
  1071. irq_set_handler(virq + i, NULL);
  1072. }
  1073. irq_domain_free_irqs_common(domain, virq, nr_irqs);
  1074. }
  1075. static void irq_domain_free_irqs_hierarchy(struct irq_domain *domain,
  1076. unsigned int irq_base,
  1077. unsigned int nr_irqs)
  1078. {
  1079. if (domain->ops->free)
  1080. domain->ops->free(domain, irq_base, nr_irqs);
  1081. }
  1082. int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain,
  1083. unsigned int irq_base,
  1084. unsigned int nr_irqs, void *arg)
  1085. {
  1086. return domain->ops->alloc(domain, irq_base, nr_irqs, arg);
  1087. }
  1088. /**
  1089. * __irq_domain_alloc_irqs - Allocate IRQs from domain
  1090. * @domain: domain to allocate from
  1091. * @irq_base: allocate specified IRQ nubmer if irq_base >= 0
  1092. * @nr_irqs: number of IRQs to allocate
  1093. * @node: NUMA node id for memory allocation
  1094. * @arg: domain specific argument
  1095. * @realloc: IRQ descriptors have already been allocated if true
  1096. * @affinity: Optional irq affinity mask for multiqueue devices
  1097. *
  1098. * Allocate IRQ numbers and initialized all data structures to support
  1099. * hierarchy IRQ domains.
  1100. * Parameter @realloc is mainly to support legacy IRQs.
  1101. * Returns error code or allocated IRQ number
  1102. *
  1103. * The whole process to setup an IRQ has been split into two steps.
  1104. * The first step, __irq_domain_alloc_irqs(), is to allocate IRQ
  1105. * descriptor and required hardware resources. The second step,
  1106. * irq_domain_activate_irq(), is to program hardwares with preallocated
  1107. * resources. In this way, it's easier to rollback when failing to
  1108. * allocate resources.
  1109. */
  1110. int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
  1111. unsigned int nr_irqs, int node, void *arg,
  1112. bool realloc, const struct cpumask *affinity)
  1113. {
  1114. int i, ret, virq;
  1115. if (domain == NULL) {
  1116. domain = irq_default_domain;
  1117. if (WARN(!domain, "domain is NULL; cannot allocate IRQ\n"))
  1118. return -EINVAL;
  1119. }
  1120. if (!domain->ops->alloc) {
  1121. pr_debug("domain->ops->alloc() is NULL\n");
  1122. return -ENOSYS;
  1123. }
  1124. if (realloc && irq_base >= 0) {
  1125. virq = irq_base;
  1126. } else {
  1127. virq = irq_domain_alloc_descs(irq_base, nr_irqs, 0, node,
  1128. affinity);
  1129. if (virq < 0) {
  1130. pr_debug("cannot allocate IRQ(base %d, count %d)\n",
  1131. irq_base, nr_irqs);
  1132. return virq;
  1133. }
  1134. }
  1135. if (irq_domain_alloc_irq_data(domain, virq, nr_irqs)) {
  1136. pr_debug("cannot allocate memory for IRQ%d\n", virq);
  1137. ret = -ENOMEM;
  1138. goto out_free_desc;
  1139. }
  1140. mutex_lock(&irq_domain_mutex);
  1141. ret = irq_domain_alloc_irqs_hierarchy(domain, virq, nr_irqs, arg);
  1142. if (ret < 0) {
  1143. mutex_unlock(&irq_domain_mutex);
  1144. goto out_free_irq_data;
  1145. }
  1146. for (i = 0; i < nr_irqs; i++)
  1147. irq_domain_insert_irq(virq + i);
  1148. mutex_unlock(&irq_domain_mutex);
  1149. return virq;
  1150. out_free_irq_data:
  1151. irq_domain_free_irq_data(virq, nr_irqs);
  1152. out_free_desc:
  1153. irq_free_descs(virq, nr_irqs);
  1154. return ret;
  1155. }
  1156. /* The irq_data was moved, fix the revmap to refer to the new location */
  1157. static void irq_domain_fix_revmap(struct irq_data *d)
  1158. {
  1159. void __rcu **slot;
  1160. if (d->hwirq < d->domain->revmap_size)
  1161. return; /* Not using radix tree. */
  1162. /* Fix up the revmap. */
  1163. mutex_lock(&d->domain->revmap_tree_mutex);
  1164. slot = radix_tree_lookup_slot(&d->domain->revmap_tree, d->hwirq);
  1165. if (slot)
  1166. radix_tree_replace_slot(&d->domain->revmap_tree, slot, d);
  1167. mutex_unlock(&d->domain->revmap_tree_mutex);
  1168. }
  1169. /**
  1170. * irq_domain_push_irq() - Push a domain in to the top of a hierarchy.
  1171. * @domain: Domain to push.
  1172. * @virq: Irq to push the domain in to.
  1173. * @arg: Passed to the irq_domain_ops alloc() function.
  1174. *
  1175. * For an already existing irqdomain hierarchy, as might be obtained
  1176. * via a call to pci_enable_msix(), add an additional domain to the
  1177. * head of the processing chain. Must be called before request_irq()
  1178. * has been called.
  1179. */
  1180. int irq_domain_push_irq(struct irq_domain *domain, int virq, void *arg)
  1181. {
  1182. struct irq_data *child_irq_data;
  1183. struct irq_data *root_irq_data = irq_get_irq_data(virq);
  1184. struct irq_desc *desc;
  1185. int rv = 0;
  1186. /*
  1187. * Check that no action has been set, which indicates the virq
  1188. * is in a state where this function doesn't have to deal with
  1189. * races between interrupt handling and maintaining the
  1190. * hierarchy. This will catch gross misuse. Attempting to
  1191. * make the check race free would require holding locks across
  1192. * calls to struct irq_domain_ops->alloc(), which could lead
  1193. * to deadlock, so we just do a simple check before starting.
  1194. */
  1195. desc = irq_to_desc(virq);
  1196. if (!desc)
  1197. return -EINVAL;
  1198. if (WARN_ON(desc->action))
  1199. return -EBUSY;
  1200. if (domain == NULL)
  1201. return -EINVAL;
  1202. if (WARN_ON(!irq_domain_is_hierarchy(domain)))
  1203. return -EINVAL;
  1204. if (!root_irq_data)
  1205. return -EINVAL;
  1206. if (domain->parent != root_irq_data->domain)
  1207. return -EINVAL;
  1208. child_irq_data = kzalloc_node(sizeof(*child_irq_data), GFP_KERNEL,
  1209. irq_data_get_node(root_irq_data));
  1210. if (!child_irq_data)
  1211. return -ENOMEM;
  1212. mutex_lock(&irq_domain_mutex);
  1213. /* Copy the original irq_data. */
  1214. *child_irq_data = *root_irq_data;
  1215. /*
  1216. * Overwrite the root_irq_data, which is embedded in struct
  1217. * irq_desc, with values for this domain.
  1218. */
  1219. root_irq_data->parent_data = child_irq_data;
  1220. root_irq_data->domain = domain;
  1221. root_irq_data->mask = 0;
  1222. root_irq_data->hwirq = 0;
  1223. root_irq_data->chip = NULL;
  1224. root_irq_data->chip_data = NULL;
  1225. /* May (probably does) set hwirq, chip, etc. */
  1226. rv = irq_domain_alloc_irqs_hierarchy(domain, virq, 1, arg);
  1227. if (rv) {
  1228. /* Restore the original irq_data. */
  1229. *root_irq_data = *child_irq_data;
  1230. goto error;
  1231. }
  1232. irq_domain_fix_revmap(child_irq_data);
  1233. irq_domain_set_mapping(domain, root_irq_data->hwirq, root_irq_data);
  1234. error:
  1235. mutex_unlock(&irq_domain_mutex);
  1236. return rv;
  1237. }
  1238. EXPORT_SYMBOL_GPL(irq_domain_push_irq);
  1239. /**
  1240. * irq_domain_pop_irq() - Remove a domain from the top of a hierarchy.
  1241. * @domain: Domain to remove.
  1242. * @virq: Irq to remove the domain from.
  1243. *
  1244. * Undo the effects of a call to irq_domain_push_irq(). Must be
  1245. * called either before request_irq() or after free_irq().
  1246. */
  1247. int irq_domain_pop_irq(struct irq_domain *domain, int virq)
  1248. {
  1249. struct irq_data *root_irq_data = irq_get_irq_data(virq);
  1250. struct irq_data *child_irq_data;
  1251. struct irq_data *tmp_irq_data;
  1252. struct irq_desc *desc;
  1253. /*
  1254. * Check that no action is set, which indicates the virq is in
  1255. * a state where this function doesn't have to deal with races
  1256. * between interrupt handling and maintaining the hierarchy.
  1257. * This will catch gross misuse. Attempting to make the check
  1258. * race free would require holding locks across calls to
  1259. * struct irq_domain_ops->free(), which could lead to
  1260. * deadlock, so we just do a simple check before starting.
  1261. */
  1262. desc = irq_to_desc(virq);
  1263. if (!desc)
  1264. return -EINVAL;
  1265. if (WARN_ON(desc->action))
  1266. return -EBUSY;
  1267. if (domain == NULL)
  1268. return -EINVAL;
  1269. if (!root_irq_data)
  1270. return -EINVAL;
  1271. tmp_irq_data = irq_domain_get_irq_data(domain, virq);
  1272. /* We can only "pop" if this domain is at the top of the list */
  1273. if (WARN_ON(root_irq_data != tmp_irq_data))
  1274. return -EINVAL;
  1275. if (WARN_ON(root_irq_data->domain != domain))
  1276. return -EINVAL;
  1277. child_irq_data = root_irq_data->parent_data;
  1278. if (WARN_ON(!child_irq_data))
  1279. return -EINVAL;
  1280. mutex_lock(&irq_domain_mutex);
  1281. root_irq_data->parent_data = NULL;
  1282. irq_domain_clear_mapping(domain, root_irq_data->hwirq);
  1283. irq_domain_free_irqs_hierarchy(domain, virq, 1);
  1284. /* Restore the original irq_data. */
  1285. *root_irq_data = *child_irq_data;
  1286. irq_domain_fix_revmap(root_irq_data);
  1287. mutex_unlock(&irq_domain_mutex);
  1288. kfree(child_irq_data);
  1289. return 0;
  1290. }
  1291. EXPORT_SYMBOL_GPL(irq_domain_pop_irq);
  1292. /**
  1293. * irq_domain_free_irqs - Free IRQ number and associated data structures
  1294. * @virq: base IRQ number
  1295. * @nr_irqs: number of IRQs to free
  1296. */
  1297. void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs)
  1298. {
  1299. struct irq_data *data = irq_get_irq_data(virq);
  1300. int i;
  1301. if (WARN(!data || !data->domain || !data->domain->ops->free,
  1302. "NULL pointer, cannot free irq\n"))
  1303. return;
  1304. mutex_lock(&irq_domain_mutex);
  1305. for (i = 0; i < nr_irqs; i++)
  1306. irq_domain_remove_irq(virq + i);
  1307. irq_domain_free_irqs_hierarchy(data->domain, virq, nr_irqs);
  1308. mutex_unlock(&irq_domain_mutex);
  1309. irq_domain_free_irq_data(virq, nr_irqs);
  1310. irq_free_descs(virq, nr_irqs);
  1311. }
  1312. /**
  1313. * irq_domain_alloc_irqs_parent - Allocate interrupts from parent domain
  1314. * @irq_base: Base IRQ number
  1315. * @nr_irqs: Number of IRQs to allocate
  1316. * @arg: Allocation data (arch/domain specific)
  1317. *
  1318. * Check whether the domain has been setup recursive. If not allocate
  1319. * through the parent domain.
  1320. */
  1321. int irq_domain_alloc_irqs_parent(struct irq_domain *domain,
  1322. unsigned int irq_base, unsigned int nr_irqs,
  1323. void *arg)
  1324. {
  1325. if (!domain->parent)
  1326. return -ENOSYS;
  1327. return irq_domain_alloc_irqs_hierarchy(domain->parent, irq_base,
  1328. nr_irqs, arg);
  1329. }
  1330. EXPORT_SYMBOL_GPL(irq_domain_alloc_irqs_parent);
  1331. /**
  1332. * irq_domain_free_irqs_parent - Free interrupts from parent domain
  1333. * @irq_base: Base IRQ number
  1334. * @nr_irqs: Number of IRQs to free
  1335. *
  1336. * Check whether the domain has been setup recursive. If not free
  1337. * through the parent domain.
  1338. */
  1339. void irq_domain_free_irqs_parent(struct irq_domain *domain,
  1340. unsigned int irq_base, unsigned int nr_irqs)
  1341. {
  1342. if (!domain->parent)
  1343. return;
  1344. irq_domain_free_irqs_hierarchy(domain->parent, irq_base, nr_irqs);
  1345. }
  1346. EXPORT_SYMBOL_GPL(irq_domain_free_irqs_parent);
  1347. static void __irq_domain_deactivate_irq(struct irq_data *irq_data)
  1348. {
  1349. if (irq_data && irq_data->domain) {
  1350. struct irq_domain *domain = irq_data->domain;
  1351. if (domain->ops->deactivate)
  1352. domain->ops->deactivate(domain, irq_data);
  1353. if (irq_data->parent_data)
  1354. __irq_domain_deactivate_irq(irq_data->parent_data);
  1355. }
  1356. }
  1357. static int __irq_domain_activate_irq(struct irq_data *irqd, bool reserve)
  1358. {
  1359. int ret = 0;
  1360. if (irqd && irqd->domain) {
  1361. struct irq_domain *domain = irqd->domain;
  1362. if (irqd->parent_data)
  1363. ret = __irq_domain_activate_irq(irqd->parent_data,
  1364. reserve);
  1365. if (!ret && domain->ops->activate) {
  1366. ret = domain->ops->activate(domain, irqd, reserve);
  1367. /* Rollback in case of error */
  1368. if (ret && irqd->parent_data)
  1369. __irq_domain_deactivate_irq(irqd->parent_data);
  1370. }
  1371. }
  1372. return ret;
  1373. }
  1374. /**
  1375. * irq_domain_activate_irq - Call domain_ops->activate recursively to activate
  1376. * interrupt
  1377. * @irq_data: Outermost irq_data associated with interrupt
  1378. * @reserve: If set only reserve an interrupt vector instead of assigning one
  1379. *
  1380. * This is the second step to call domain_ops->activate to program interrupt
  1381. * controllers, so the interrupt could actually get delivered.
  1382. */
  1383. int irq_domain_activate_irq(struct irq_data *irq_data, bool reserve)
  1384. {
  1385. int ret = 0;
  1386. if (!irqd_is_activated(irq_data))
  1387. ret = __irq_domain_activate_irq(irq_data, reserve);
  1388. if (!ret)
  1389. irqd_set_activated(irq_data);
  1390. return ret;
  1391. }
  1392. /**
  1393. * irq_domain_deactivate_irq - Call domain_ops->deactivate recursively to
  1394. * deactivate interrupt
  1395. * @irq_data: outermost irq_data associated with interrupt
  1396. *
  1397. * It calls domain_ops->deactivate to program interrupt controllers to disable
  1398. * interrupt delivery.
  1399. */
  1400. void irq_domain_deactivate_irq(struct irq_data *irq_data)
  1401. {
  1402. if (irqd_is_activated(irq_data)) {
  1403. __irq_domain_deactivate_irq(irq_data);
  1404. irqd_clr_activated(irq_data);
  1405. }
  1406. }
  1407. static void irq_domain_check_hierarchy(struct irq_domain *domain)
  1408. {
  1409. /* Hierarchy irq_domains must implement callback alloc() */
  1410. if (domain->ops->alloc)
  1411. domain->flags |= IRQ_DOMAIN_FLAG_HIERARCHY;
  1412. }
  1413. /**
  1414. * irq_domain_hierarchical_is_msi_remap - Check if the domain or any
  1415. * parent has MSI remapping support
  1416. * @domain: domain pointer
  1417. */
  1418. bool irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain)
  1419. {
  1420. for (; domain; domain = domain->parent) {
  1421. if (irq_domain_is_msi_remap(domain))
  1422. return true;
  1423. }
  1424. return false;
  1425. }
  1426. #else /* CONFIG_IRQ_DOMAIN_HIERARCHY */
  1427. /**
  1428. * irq_domain_get_irq_data - Get irq_data associated with @virq and @domain
  1429. * @domain: domain to match
  1430. * @virq: IRQ number to get irq_data
  1431. */
  1432. struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain,
  1433. unsigned int virq)
  1434. {
  1435. struct irq_data *irq_data = irq_get_irq_data(virq);
  1436. return (irq_data && irq_data->domain == domain) ? irq_data : NULL;
  1437. }
  1438. EXPORT_SYMBOL_GPL(irq_domain_get_irq_data);
  1439. /**
  1440. * irq_domain_set_info - Set the complete data for a @virq in @domain
  1441. * @domain: Interrupt domain to match
  1442. * @virq: IRQ number
  1443. * @hwirq: The hardware interrupt number
  1444. * @chip: The associated interrupt chip
  1445. * @chip_data: The associated interrupt chip data
  1446. * @handler: The interrupt flow handler
  1447. * @handler_data: The interrupt flow handler data
  1448. * @handler_name: The interrupt handler name
  1449. */
  1450. void irq_domain_set_info(struct irq_domain *domain, unsigned int virq,
  1451. irq_hw_number_t hwirq, struct irq_chip *chip,
  1452. void *chip_data, irq_flow_handler_t handler,
  1453. void *handler_data, const char *handler_name)
  1454. {
  1455. irq_set_chip_and_handler_name(virq, chip, handler, handler_name);
  1456. irq_set_chip_data(virq, chip_data);
  1457. irq_set_handler_data(virq, handler_data);
  1458. }
  1459. static void irq_domain_check_hierarchy(struct irq_domain *domain)
  1460. {
  1461. }
  1462. #endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */
  1463. #ifdef CONFIG_GENERIC_IRQ_DEBUGFS
  1464. static struct dentry *domain_dir;
  1465. static void
  1466. irq_domain_debug_show_one(struct seq_file *m, struct irq_domain *d, int ind)
  1467. {
  1468. seq_printf(m, "%*sname: %s\n", ind, "", d->name);
  1469. seq_printf(m, "%*ssize: %u\n", ind + 1, "",
  1470. d->revmap_size + d->revmap_direct_max_irq);
  1471. seq_printf(m, "%*smapped: %u\n", ind + 1, "", d->mapcount);
  1472. seq_printf(m, "%*sflags: 0x%08x\n", ind +1 , "", d->flags);
  1473. if (d->ops && d->ops->debug_show)
  1474. d->ops->debug_show(m, d, NULL, ind + 1);
  1475. #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
  1476. if (!d->parent)
  1477. return;
  1478. seq_printf(m, "%*sparent: %s\n", ind + 1, "", d->parent->name);
  1479. irq_domain_debug_show_one(m, d->parent, ind + 4);
  1480. #endif
  1481. }
  1482. static int irq_domain_debug_show(struct seq_file *m, void *p)
  1483. {
  1484. struct irq_domain *d = m->private;
  1485. /* Default domain? Might be NULL */
  1486. if (!d) {
  1487. if (!irq_default_domain)
  1488. return 0;
  1489. d = irq_default_domain;
  1490. }
  1491. irq_domain_debug_show_one(m, d, 0);
  1492. return 0;
  1493. }
  1494. DEFINE_SHOW_ATTRIBUTE(irq_domain_debug);
  1495. static void debugfs_add_domain_dir(struct irq_domain *d)
  1496. {
  1497. if (!d->name || !domain_dir || d->debugfs_file)
  1498. return;
  1499. d->debugfs_file = debugfs_create_file(d->name, 0444, domain_dir, d,
  1500. &irq_domain_debug_fops);
  1501. }
  1502. static void debugfs_remove_domain_dir(struct irq_domain *d)
  1503. {
  1504. debugfs_remove(d->debugfs_file);
  1505. }
  1506. void __init irq_domain_debugfs_init(struct dentry *root)
  1507. {
  1508. struct irq_domain *d;
  1509. domain_dir = debugfs_create_dir("domains", root);
  1510. if (!domain_dir)
  1511. return;
  1512. debugfs_create_file("default", 0444, domain_dir, NULL,
  1513. &irq_domain_debug_fops);
  1514. mutex_lock(&irq_domain_mutex);
  1515. list_for_each_entry(d, &irq_domain_list, link)
  1516. debugfs_add_domain_dir(d);
  1517. mutex_unlock(&irq_domain_mutex);
  1518. }
  1519. #endif