irqdomain.c 46 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703
  1. #define pr_fmt(fmt) "irq: " fmt
  2. #include <linux/acpi.h>
  3. #include <linux/debugfs.h>
  4. #include <linux/hardirq.h>
  5. #include <linux/interrupt.h>
  6. #include <linux/irq.h>
  7. #include <linux/irqdesc.h>
  8. #include <linux/irqdomain.h>
  9. #include <linux/module.h>
  10. #include <linux/mutex.h>
  11. #include <linux/of.h>
  12. #include <linux/of_address.h>
  13. #include <linux/of_irq.h>
  14. #include <linux/topology.h>
  15. #include <linux/seq_file.h>
  16. #include <linux/slab.h>
  17. #include <linux/smp.h>
  18. #include <linux/fs.h>
  19. static LIST_HEAD(irq_domain_list);
  20. static DEFINE_MUTEX(irq_domain_mutex);
  21. static DEFINE_MUTEX(revmap_trees_mutex);
  22. static struct irq_domain *irq_default_domain;
  23. static void irq_domain_check_hierarchy(struct irq_domain *domain);
  24. struct irqchip_fwid {
  25. struct fwnode_handle fwnode;
  26. unsigned int type;
  27. char *name;
  28. void *data;
  29. };
  30. #ifdef CONFIG_GENERIC_IRQ_DEBUGFS
  31. static void debugfs_add_domain_dir(struct irq_domain *d);
  32. static void debugfs_remove_domain_dir(struct irq_domain *d);
  33. #else
  34. static inline void debugfs_add_domain_dir(struct irq_domain *d) { }
  35. static inline void debugfs_remove_domain_dir(struct irq_domain *d) { }
  36. #endif
  37. /**
  38. * irq_domain_alloc_fwnode - Allocate a fwnode_handle suitable for
  39. * identifying an irq domain
  40. * @type: Type of irqchip_fwnode. See linux/irqdomain.h
  41. * @name: Optional user provided domain name
  42. * @id: Optional user provided id if name != NULL
  43. * @data: Optional user-provided data
  44. *
  45. * Allocate a struct irqchip_fwid, and return a poiner to the embedded
  46. * fwnode_handle (or NULL on failure).
  47. *
  48. * Note: The types IRQCHIP_FWNODE_NAMED and IRQCHIP_FWNODE_NAMED_ID are
  49. * solely to transport name information to irqdomain creation code. The
  50. * node is not stored. For other types the pointer is kept in the irq
  51. * domain struct.
  52. */
  53. struct fwnode_handle *__irq_domain_alloc_fwnode(unsigned int type, int id,
  54. const char *name, void *data)
  55. {
  56. struct irqchip_fwid *fwid;
  57. char *n;
  58. fwid = kzalloc(sizeof(*fwid), GFP_KERNEL);
  59. switch (type) {
  60. case IRQCHIP_FWNODE_NAMED:
  61. n = kasprintf(GFP_KERNEL, "%s", name);
  62. break;
  63. case IRQCHIP_FWNODE_NAMED_ID:
  64. n = kasprintf(GFP_KERNEL, "%s-%d", name, id);
  65. break;
  66. default:
  67. n = kasprintf(GFP_KERNEL, "irqchip@%p", data);
  68. break;
  69. }
  70. if (!fwid || !n) {
  71. kfree(fwid);
  72. kfree(n);
  73. return NULL;
  74. }
  75. fwid->type = type;
  76. fwid->name = n;
  77. fwid->data = data;
  78. fwid->fwnode.type = FWNODE_IRQCHIP;
  79. return &fwid->fwnode;
  80. }
  81. EXPORT_SYMBOL_GPL(__irq_domain_alloc_fwnode);
  82. /**
  83. * irq_domain_free_fwnode - Free a non-OF-backed fwnode_handle
  84. *
  85. * Free a fwnode_handle allocated with irq_domain_alloc_fwnode.
  86. */
  87. void irq_domain_free_fwnode(struct fwnode_handle *fwnode)
  88. {
  89. struct irqchip_fwid *fwid;
  90. if (WARN_ON(!is_fwnode_irqchip(fwnode)))
  91. return;
  92. fwid = container_of(fwnode, struct irqchip_fwid, fwnode);
  93. kfree(fwid->name);
  94. kfree(fwid);
  95. }
  96. EXPORT_SYMBOL_GPL(irq_domain_free_fwnode);
  97. /**
  98. * __irq_domain_add() - Allocate a new irq_domain data structure
  99. * @fwnode: firmware node for the interrupt controller
  100. * @size: Size of linear map; 0 for radix mapping only
  101. * @hwirq_max: Maximum number of interrupts supported by controller
  102. * @direct_max: Maximum value of direct maps; Use ~0 for no limit; 0 for no
  103. * direct mapping
  104. * @ops: domain callbacks
  105. * @host_data: Controller private data pointer
  106. *
  107. * Allocates and initialize and irq_domain structure.
  108. * Returns pointer to IRQ domain, or NULL on failure.
  109. */
  110. struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size,
  111. irq_hw_number_t hwirq_max, int direct_max,
  112. const struct irq_domain_ops *ops,
  113. void *host_data)
  114. {
  115. struct device_node *of_node = to_of_node(fwnode);
  116. struct irqchip_fwid *fwid;
  117. struct irq_domain *domain;
  118. static atomic_t unknown_domains;
  119. domain = kzalloc_node(sizeof(*domain) + (sizeof(unsigned int) * size),
  120. GFP_KERNEL, of_node_to_nid(of_node));
  121. if (WARN_ON(!domain))
  122. return NULL;
  123. if (fwnode && is_fwnode_irqchip(fwnode)) {
  124. fwid = container_of(fwnode, struct irqchip_fwid, fwnode);
  125. switch (fwid->type) {
  126. case IRQCHIP_FWNODE_NAMED:
  127. case IRQCHIP_FWNODE_NAMED_ID:
  128. domain->name = kstrdup(fwid->name, GFP_KERNEL);
  129. if (!domain->name) {
  130. kfree(domain);
  131. return NULL;
  132. }
  133. domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
  134. break;
  135. default:
  136. domain->fwnode = fwnode;
  137. domain->name = fwid->name;
  138. break;
  139. }
  140. #ifdef CONFIG_ACPI
  141. } else if (is_acpi_device_node(fwnode)) {
  142. struct acpi_buffer buf = {
  143. .length = ACPI_ALLOCATE_BUFFER,
  144. };
  145. acpi_handle handle;
  146. handle = acpi_device_handle(to_acpi_device_node(fwnode));
  147. if (acpi_get_name(handle, ACPI_FULL_PATHNAME, &buf) == AE_OK) {
  148. domain->name = buf.pointer;
  149. domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
  150. }
  151. domain->fwnode = fwnode;
  152. #endif
  153. } else if (of_node) {
  154. char *name;
  155. /*
  156. * DT paths contain '/', which debugfs is legitimately
  157. * unhappy about. Replace them with ':', which does
  158. * the trick and is not as offensive as '\'...
  159. */
  160. name = kstrdup(of_node_full_name(of_node), GFP_KERNEL);
  161. if (!name) {
  162. kfree(domain);
  163. return NULL;
  164. }
  165. strreplace(name, '/', ':');
  166. domain->name = name;
  167. domain->fwnode = fwnode;
  168. domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
  169. }
  170. if (!domain->name) {
  171. if (fwnode) {
  172. pr_err("Invalid fwnode type (%d) for irqdomain\n",
  173. fwnode->type);
  174. }
  175. domain->name = kasprintf(GFP_KERNEL, "unknown-%d",
  176. atomic_inc_return(&unknown_domains));
  177. if (!domain->name) {
  178. kfree(domain);
  179. return NULL;
  180. }
  181. domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
  182. }
  183. of_node_get(of_node);
  184. /* Fill structure */
  185. INIT_RADIX_TREE(&domain->revmap_tree, GFP_KERNEL);
  186. domain->ops = ops;
  187. domain->host_data = host_data;
  188. domain->hwirq_max = hwirq_max;
  189. domain->revmap_size = size;
  190. domain->revmap_direct_max_irq = direct_max;
  191. irq_domain_check_hierarchy(domain);
  192. mutex_lock(&irq_domain_mutex);
  193. debugfs_add_domain_dir(domain);
  194. list_add(&domain->link, &irq_domain_list);
  195. mutex_unlock(&irq_domain_mutex);
  196. pr_debug("Added domain %s\n", domain->name);
  197. return domain;
  198. }
  199. EXPORT_SYMBOL_GPL(__irq_domain_add);
  200. /**
  201. * irq_domain_remove() - Remove an irq domain.
  202. * @domain: domain to remove
  203. *
  204. * This routine is used to remove an irq domain. The caller must ensure
  205. * that all mappings within the domain have been disposed of prior to
  206. * use, depending on the revmap type.
  207. */
  208. void irq_domain_remove(struct irq_domain *domain)
  209. {
  210. mutex_lock(&irq_domain_mutex);
  211. debugfs_remove_domain_dir(domain);
  212. WARN_ON(!radix_tree_empty(&domain->revmap_tree));
  213. list_del(&domain->link);
  214. /*
  215. * If the going away domain is the default one, reset it.
  216. */
  217. if (unlikely(irq_default_domain == domain))
  218. irq_set_default_host(NULL);
  219. mutex_unlock(&irq_domain_mutex);
  220. pr_debug("Removed domain %s\n", domain->name);
  221. of_node_put(irq_domain_get_of_node(domain));
  222. if (domain->flags & IRQ_DOMAIN_NAME_ALLOCATED)
  223. kfree(domain->name);
  224. kfree(domain);
  225. }
  226. EXPORT_SYMBOL_GPL(irq_domain_remove);
  227. void irq_domain_update_bus_token(struct irq_domain *domain,
  228. enum irq_domain_bus_token bus_token)
  229. {
  230. char *name;
  231. if (domain->bus_token == bus_token)
  232. return;
  233. mutex_lock(&irq_domain_mutex);
  234. domain->bus_token = bus_token;
  235. name = kasprintf(GFP_KERNEL, "%s-%d", domain->name, bus_token);
  236. if (!name) {
  237. mutex_unlock(&irq_domain_mutex);
  238. return;
  239. }
  240. debugfs_remove_domain_dir(domain);
  241. if (domain->flags & IRQ_DOMAIN_NAME_ALLOCATED)
  242. kfree(domain->name);
  243. else
  244. domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
  245. domain->name = name;
  246. debugfs_add_domain_dir(domain);
  247. mutex_unlock(&irq_domain_mutex);
  248. }
  249. /**
  250. * irq_domain_add_simple() - Register an irq_domain and optionally map a range of irqs
  251. * @of_node: pointer to interrupt controller's device tree node.
  252. * @size: total number of irqs in mapping
  253. * @first_irq: first number of irq block assigned to the domain,
  254. * pass zero to assign irqs on-the-fly. If first_irq is non-zero, then
  255. * pre-map all of the irqs in the domain to virqs starting at first_irq.
  256. * @ops: domain callbacks
  257. * @host_data: Controller private data pointer
  258. *
  259. * Allocates an irq_domain, and optionally if first_irq is positive then also
  260. * allocate irq_descs and map all of the hwirqs to virqs starting at first_irq.
  261. *
  262. * This is intended to implement the expected behaviour for most
  263. * interrupt controllers. If device tree is used, then first_irq will be 0 and
  264. * irqs get mapped dynamically on the fly. However, if the controller requires
  265. * static virq assignments (non-DT boot) then it will set that up correctly.
  266. */
  267. struct irq_domain *irq_domain_add_simple(struct device_node *of_node,
  268. unsigned int size,
  269. unsigned int first_irq,
  270. const struct irq_domain_ops *ops,
  271. void *host_data)
  272. {
  273. struct irq_domain *domain;
  274. domain = __irq_domain_add(of_node_to_fwnode(of_node), size, size, 0, ops, host_data);
  275. if (!domain)
  276. return NULL;
  277. if (first_irq > 0) {
  278. if (IS_ENABLED(CONFIG_SPARSE_IRQ)) {
  279. /* attempt to allocated irq_descs */
  280. int rc = irq_alloc_descs(first_irq, first_irq, size,
  281. of_node_to_nid(of_node));
  282. if (rc < 0)
  283. pr_info("Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n",
  284. first_irq);
  285. }
  286. irq_domain_associate_many(domain, first_irq, 0, size);
  287. }
  288. return domain;
  289. }
  290. EXPORT_SYMBOL_GPL(irq_domain_add_simple);
  291. /**
  292. * irq_domain_add_legacy() - Allocate and register a legacy revmap irq_domain.
  293. * @of_node: pointer to interrupt controller's device tree node.
  294. * @size: total number of irqs in legacy mapping
  295. * @first_irq: first number of irq block assigned to the domain
  296. * @first_hwirq: first hwirq number to use for the translation. Should normally
  297. * be '0', but a positive integer can be used if the effective
  298. * hwirqs numbering does not begin at zero.
  299. * @ops: map/unmap domain callbacks
  300. * @host_data: Controller private data pointer
  301. *
  302. * Note: the map() callback will be called before this function returns
  303. * for all legacy interrupts except 0 (which is always the invalid irq for
  304. * a legacy controller).
  305. */
  306. struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
  307. unsigned int size,
  308. unsigned int first_irq,
  309. irq_hw_number_t first_hwirq,
  310. const struct irq_domain_ops *ops,
  311. void *host_data)
  312. {
  313. struct irq_domain *domain;
  314. domain = __irq_domain_add(of_node_to_fwnode(of_node), first_hwirq + size,
  315. first_hwirq + size, 0, ops, host_data);
  316. if (domain)
  317. irq_domain_associate_many(domain, first_irq, first_hwirq, size);
  318. return domain;
  319. }
  320. EXPORT_SYMBOL_GPL(irq_domain_add_legacy);
  321. /**
  322. * irq_find_matching_fwspec() - Locates a domain for a given fwspec
  323. * @fwspec: FW specifier for an interrupt
  324. * @bus_token: domain-specific data
  325. */
  326. struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec,
  327. enum irq_domain_bus_token bus_token)
  328. {
  329. struct irq_domain *h, *found = NULL;
  330. struct fwnode_handle *fwnode = fwspec->fwnode;
  331. int rc;
  332. /* We might want to match the legacy controller last since
  333. * it might potentially be set to match all interrupts in
  334. * the absence of a device node. This isn't a problem so far
  335. * yet though...
  336. *
  337. * bus_token == DOMAIN_BUS_ANY matches any domain, any other
  338. * values must generate an exact match for the domain to be
  339. * selected.
  340. */
  341. mutex_lock(&irq_domain_mutex);
  342. list_for_each_entry(h, &irq_domain_list, link) {
  343. if (h->ops->select && fwspec->param_count)
  344. rc = h->ops->select(h, fwspec, bus_token);
  345. else if (h->ops->match)
  346. rc = h->ops->match(h, to_of_node(fwnode), bus_token);
  347. else
  348. rc = ((fwnode != NULL) && (h->fwnode == fwnode) &&
  349. ((bus_token == DOMAIN_BUS_ANY) ||
  350. (h->bus_token == bus_token)));
  351. if (rc) {
  352. found = h;
  353. break;
  354. }
  355. }
  356. mutex_unlock(&irq_domain_mutex);
  357. return found;
  358. }
  359. EXPORT_SYMBOL_GPL(irq_find_matching_fwspec);
  360. /**
  361. * irq_domain_check_msi_remap - Check whether all MSI irq domains implement
  362. * IRQ remapping
  363. *
  364. * Return: false if any MSI irq domain does not support IRQ remapping,
  365. * true otherwise (including if there is no MSI irq domain)
  366. */
  367. bool irq_domain_check_msi_remap(void)
  368. {
  369. struct irq_domain *h;
  370. bool ret = true;
  371. mutex_lock(&irq_domain_mutex);
  372. list_for_each_entry(h, &irq_domain_list, link) {
  373. if (irq_domain_is_msi(h) &&
  374. !irq_domain_hierarchical_is_msi_remap(h)) {
  375. ret = false;
  376. break;
  377. }
  378. }
  379. mutex_unlock(&irq_domain_mutex);
  380. return ret;
  381. }
  382. EXPORT_SYMBOL_GPL(irq_domain_check_msi_remap);
  383. /**
  384. * irq_set_default_host() - Set a "default" irq domain
  385. * @domain: default domain pointer
  386. *
  387. * For convenience, it's possible to set a "default" domain that will be used
  388. * whenever NULL is passed to irq_create_mapping(). It makes life easier for
  389. * platforms that want to manipulate a few hard coded interrupt numbers that
  390. * aren't properly represented in the device-tree.
  391. */
  392. void irq_set_default_host(struct irq_domain *domain)
  393. {
  394. pr_debug("Default domain set to @0x%p\n", domain);
  395. irq_default_domain = domain;
  396. }
  397. EXPORT_SYMBOL_GPL(irq_set_default_host);
  398. void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq)
  399. {
  400. struct irq_data *irq_data = irq_get_irq_data(irq);
  401. irq_hw_number_t hwirq;
  402. if (WARN(!irq_data || irq_data->domain != domain,
  403. "virq%i doesn't exist; cannot disassociate\n", irq))
  404. return;
  405. hwirq = irq_data->hwirq;
  406. irq_set_status_flags(irq, IRQ_NOREQUEST);
  407. /* remove chip and handler */
  408. irq_set_chip_and_handler(irq, NULL, NULL);
  409. /* Make sure it's completed */
  410. synchronize_irq(irq);
  411. /* Tell the PIC about it */
  412. if (domain->ops->unmap)
  413. domain->ops->unmap(domain, irq);
  414. smp_mb();
  415. irq_data->domain = NULL;
  416. irq_data->hwirq = 0;
  417. domain->mapcount--;
  418. /* Clear reverse map for this hwirq */
  419. if (hwirq < domain->revmap_size) {
  420. domain->linear_revmap[hwirq] = 0;
  421. } else {
  422. mutex_lock(&revmap_trees_mutex);
  423. radix_tree_delete(&domain->revmap_tree, hwirq);
  424. mutex_unlock(&revmap_trees_mutex);
  425. }
  426. }
  427. int irq_domain_associate(struct irq_domain *domain, unsigned int virq,
  428. irq_hw_number_t hwirq)
  429. {
  430. struct irq_data *irq_data = irq_get_irq_data(virq);
  431. int ret;
  432. if (WARN(hwirq >= domain->hwirq_max,
  433. "error: hwirq 0x%x is too large for %s\n", (int)hwirq, domain->name))
  434. return -EINVAL;
  435. if (WARN(!irq_data, "error: virq%i is not allocated", virq))
  436. return -EINVAL;
  437. if (WARN(irq_data->domain, "error: virq%i is already associated", virq))
  438. return -EINVAL;
  439. mutex_lock(&irq_domain_mutex);
  440. irq_data->hwirq = hwirq;
  441. irq_data->domain = domain;
  442. if (domain->ops->map) {
  443. ret = domain->ops->map(domain, virq, hwirq);
  444. if (ret != 0) {
  445. /*
  446. * If map() returns -EPERM, this interrupt is protected
  447. * by the firmware or some other service and shall not
  448. * be mapped. Don't bother telling the user about it.
  449. */
  450. if (ret != -EPERM) {
  451. pr_info("%s didn't like hwirq-0x%lx to VIRQ%i mapping (rc=%d)\n",
  452. domain->name, hwirq, virq, ret);
  453. }
  454. irq_data->domain = NULL;
  455. irq_data->hwirq = 0;
  456. mutex_unlock(&irq_domain_mutex);
  457. return ret;
  458. }
  459. /* If not already assigned, give the domain the chip's name */
  460. if (!domain->name && irq_data->chip)
  461. domain->name = irq_data->chip->name;
  462. }
  463. domain->mapcount++;
  464. if (hwirq < domain->revmap_size) {
  465. domain->linear_revmap[hwirq] = virq;
  466. } else {
  467. mutex_lock(&revmap_trees_mutex);
  468. radix_tree_insert(&domain->revmap_tree, hwirq, irq_data);
  469. mutex_unlock(&revmap_trees_mutex);
  470. }
  471. mutex_unlock(&irq_domain_mutex);
  472. irq_clear_status_flags(virq, IRQ_NOREQUEST);
  473. return 0;
  474. }
  475. EXPORT_SYMBOL_GPL(irq_domain_associate);
  476. void irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base,
  477. irq_hw_number_t hwirq_base, int count)
  478. {
  479. struct device_node *of_node;
  480. int i;
  481. of_node = irq_domain_get_of_node(domain);
  482. pr_debug("%s(%s, irqbase=%i, hwbase=%i, count=%i)\n", __func__,
  483. of_node_full_name(of_node), irq_base, (int)hwirq_base, count);
  484. for (i = 0; i < count; i++) {
  485. irq_domain_associate(domain, irq_base + i, hwirq_base + i);
  486. }
  487. }
  488. EXPORT_SYMBOL_GPL(irq_domain_associate_many);
  489. /**
  490. * irq_create_direct_mapping() - Allocate an irq for direct mapping
  491. * @domain: domain to allocate the irq for or NULL for default domain
  492. *
  493. * This routine is used for irq controllers which can choose the hardware
  494. * interrupt numbers they generate. In such a case it's simplest to use
  495. * the linux irq as the hardware interrupt number. It still uses the linear
  496. * or radix tree to store the mapping, but the irq controller can optimize
  497. * the revmap path by using the hwirq directly.
  498. */
  499. unsigned int irq_create_direct_mapping(struct irq_domain *domain)
  500. {
  501. struct device_node *of_node;
  502. unsigned int virq;
  503. if (domain == NULL)
  504. domain = irq_default_domain;
  505. of_node = irq_domain_get_of_node(domain);
  506. virq = irq_alloc_desc_from(1, of_node_to_nid(of_node));
  507. if (!virq) {
  508. pr_debug("create_direct virq allocation failed\n");
  509. return 0;
  510. }
  511. if (virq >= domain->revmap_direct_max_irq) {
  512. pr_err("ERROR: no free irqs available below %i maximum\n",
  513. domain->revmap_direct_max_irq);
  514. irq_free_desc(virq);
  515. return 0;
  516. }
  517. pr_debug("create_direct obtained virq %d\n", virq);
  518. if (irq_domain_associate(domain, virq, virq)) {
  519. irq_free_desc(virq);
  520. return 0;
  521. }
  522. return virq;
  523. }
  524. EXPORT_SYMBOL_GPL(irq_create_direct_mapping);
  525. /**
  526. * irq_create_mapping() - Map a hardware interrupt into linux irq space
  527. * @domain: domain owning this hardware interrupt or NULL for default domain
  528. * @hwirq: hardware irq number in that domain space
  529. *
  530. * Only one mapping per hardware interrupt is permitted. Returns a linux
  531. * irq number.
  532. * If the sense/trigger is to be specified, set_irq_type() should be called
  533. * on the number returned from that call.
  534. */
  535. unsigned int irq_create_mapping(struct irq_domain *domain,
  536. irq_hw_number_t hwirq)
  537. {
  538. struct device_node *of_node;
  539. int virq;
  540. pr_debug("irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq);
  541. /* Look for default domain if nececssary */
  542. if (domain == NULL)
  543. domain = irq_default_domain;
  544. if (domain == NULL) {
  545. WARN(1, "%s(, %lx) called with NULL domain\n", __func__, hwirq);
  546. return 0;
  547. }
  548. pr_debug("-> using domain @%p\n", domain);
  549. of_node = irq_domain_get_of_node(domain);
  550. /* Check if mapping already exists */
  551. virq = irq_find_mapping(domain, hwirq);
  552. if (virq) {
  553. pr_debug("-> existing mapping on virq %d\n", virq);
  554. return virq;
  555. }
  556. /* Allocate a virtual interrupt number */
  557. virq = irq_domain_alloc_descs(-1, 1, hwirq, of_node_to_nid(of_node), NULL);
  558. if (virq <= 0) {
  559. pr_debug("-> virq allocation failed\n");
  560. return 0;
  561. }
  562. if (irq_domain_associate(domain, virq, hwirq)) {
  563. irq_free_desc(virq);
  564. return 0;
  565. }
  566. pr_debug("irq %lu on domain %s mapped to virtual irq %u\n",
  567. hwirq, of_node_full_name(of_node), virq);
  568. return virq;
  569. }
  570. EXPORT_SYMBOL_GPL(irq_create_mapping);
  571. /**
  572. * irq_create_strict_mappings() - Map a range of hw irqs to fixed linux irqs
  573. * @domain: domain owning the interrupt range
  574. * @irq_base: beginning of linux IRQ range
  575. * @hwirq_base: beginning of hardware IRQ range
  576. * @count: Number of interrupts to map
  577. *
  578. * This routine is used for allocating and mapping a range of hardware
  579. * irqs to linux irqs where the linux irq numbers are at pre-defined
  580. * locations. For use by controllers that already have static mappings
  581. * to insert in to the domain.
  582. *
  583. * Non-linear users can use irq_create_identity_mapping() for IRQ-at-a-time
  584. * domain insertion.
  585. *
  586. * 0 is returned upon success, while any failure to establish a static
  587. * mapping is treated as an error.
  588. */
  589. int irq_create_strict_mappings(struct irq_domain *domain, unsigned int irq_base,
  590. irq_hw_number_t hwirq_base, int count)
  591. {
  592. struct device_node *of_node;
  593. int ret;
  594. of_node = irq_domain_get_of_node(domain);
  595. ret = irq_alloc_descs(irq_base, irq_base, count,
  596. of_node_to_nid(of_node));
  597. if (unlikely(ret < 0))
  598. return ret;
  599. irq_domain_associate_many(domain, irq_base, hwirq_base, count);
  600. return 0;
  601. }
  602. EXPORT_SYMBOL_GPL(irq_create_strict_mappings);
  603. static int irq_domain_translate(struct irq_domain *d,
  604. struct irq_fwspec *fwspec,
  605. irq_hw_number_t *hwirq, unsigned int *type)
  606. {
  607. #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
  608. if (d->ops->translate)
  609. return d->ops->translate(d, fwspec, hwirq, type);
  610. #endif
  611. if (d->ops->xlate)
  612. return d->ops->xlate(d, to_of_node(fwspec->fwnode),
  613. fwspec->param, fwspec->param_count,
  614. hwirq, type);
  615. /* If domain has no translation, then we assume interrupt line */
  616. *hwirq = fwspec->param[0];
  617. return 0;
  618. }
  619. static void of_phandle_args_to_fwspec(struct of_phandle_args *irq_data,
  620. struct irq_fwspec *fwspec)
  621. {
  622. int i;
  623. fwspec->fwnode = irq_data->np ? &irq_data->np->fwnode : NULL;
  624. fwspec->param_count = irq_data->args_count;
  625. for (i = 0; i < irq_data->args_count; i++)
  626. fwspec->param[i] = irq_data->args[i];
  627. }
  628. unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec)
  629. {
  630. struct irq_domain *domain;
  631. struct irq_data *irq_data;
  632. irq_hw_number_t hwirq;
  633. unsigned int type = IRQ_TYPE_NONE;
  634. int virq;
  635. if (fwspec->fwnode) {
  636. domain = irq_find_matching_fwspec(fwspec, DOMAIN_BUS_WIRED);
  637. if (!domain)
  638. domain = irq_find_matching_fwspec(fwspec, DOMAIN_BUS_ANY);
  639. } else {
  640. domain = irq_default_domain;
  641. }
  642. if (!domain) {
  643. pr_warn("no irq domain found for %s !\n",
  644. of_node_full_name(to_of_node(fwspec->fwnode)));
  645. return 0;
  646. }
  647. if (irq_domain_translate(domain, fwspec, &hwirq, &type))
  648. return 0;
  649. /*
  650. * WARN if the irqchip returns a type with bits
  651. * outside the sense mask set and clear these bits.
  652. */
  653. if (WARN_ON(type & ~IRQ_TYPE_SENSE_MASK))
  654. type &= IRQ_TYPE_SENSE_MASK;
  655. /*
  656. * If we've already configured this interrupt,
  657. * don't do it again, or hell will break loose.
  658. */
  659. virq = irq_find_mapping(domain, hwirq);
  660. if (virq) {
  661. /*
  662. * If the trigger type is not specified or matches the
  663. * current trigger type then we are done so return the
  664. * interrupt number.
  665. */
  666. if (type == IRQ_TYPE_NONE || type == irq_get_trigger_type(virq))
  667. return virq;
  668. /*
  669. * If the trigger type has not been set yet, then set
  670. * it now and return the interrupt number.
  671. */
  672. if (irq_get_trigger_type(virq) == IRQ_TYPE_NONE) {
  673. irq_data = irq_get_irq_data(virq);
  674. if (!irq_data)
  675. return 0;
  676. irqd_set_trigger_type(irq_data, type);
  677. return virq;
  678. }
  679. pr_warn("type mismatch, failed to map hwirq-%lu for %s!\n",
  680. hwirq, of_node_full_name(to_of_node(fwspec->fwnode)));
  681. return 0;
  682. }
  683. if (irq_domain_is_hierarchy(domain)) {
  684. virq = irq_domain_alloc_irqs(domain, 1, NUMA_NO_NODE, fwspec);
  685. if (virq <= 0)
  686. return 0;
  687. } else {
  688. /* Create mapping */
  689. virq = irq_create_mapping(domain, hwirq);
  690. if (!virq)
  691. return virq;
  692. }
  693. irq_data = irq_get_irq_data(virq);
  694. if (!irq_data) {
  695. if (irq_domain_is_hierarchy(domain))
  696. irq_domain_free_irqs(virq, 1);
  697. else
  698. irq_dispose_mapping(virq);
  699. return 0;
  700. }
  701. /* Store trigger type */
  702. irqd_set_trigger_type(irq_data, type);
  703. return virq;
  704. }
  705. EXPORT_SYMBOL_GPL(irq_create_fwspec_mapping);
  706. unsigned int irq_create_of_mapping(struct of_phandle_args *irq_data)
  707. {
  708. struct irq_fwspec fwspec;
  709. of_phandle_args_to_fwspec(irq_data, &fwspec);
  710. return irq_create_fwspec_mapping(&fwspec);
  711. }
  712. EXPORT_SYMBOL_GPL(irq_create_of_mapping);
  713. /**
  714. * irq_dispose_mapping() - Unmap an interrupt
  715. * @virq: linux irq number of the interrupt to unmap
  716. */
  717. void irq_dispose_mapping(unsigned int virq)
  718. {
  719. struct irq_data *irq_data = irq_get_irq_data(virq);
  720. struct irq_domain *domain;
  721. if (!virq || !irq_data)
  722. return;
  723. domain = irq_data->domain;
  724. if (WARN_ON(domain == NULL))
  725. return;
  726. if (irq_domain_is_hierarchy(domain)) {
  727. irq_domain_free_irqs(virq, 1);
  728. } else {
  729. irq_domain_disassociate(domain, virq);
  730. irq_free_desc(virq);
  731. }
  732. }
  733. EXPORT_SYMBOL_GPL(irq_dispose_mapping);
  734. /**
  735. * irq_find_mapping() - Find a linux irq from an hw irq number.
  736. * @domain: domain owning this hardware interrupt
  737. * @hwirq: hardware irq number in that domain space
  738. */
  739. unsigned int irq_find_mapping(struct irq_domain *domain,
  740. irq_hw_number_t hwirq)
  741. {
  742. struct irq_data *data;
  743. /* Look for default domain if nececssary */
  744. if (domain == NULL)
  745. domain = irq_default_domain;
  746. if (domain == NULL)
  747. return 0;
  748. if (hwirq < domain->revmap_direct_max_irq) {
  749. data = irq_domain_get_irq_data(domain, hwirq);
  750. if (data && data->hwirq == hwirq)
  751. return hwirq;
  752. }
  753. /* Check if the hwirq is in the linear revmap. */
  754. if (hwirq < domain->revmap_size)
  755. return domain->linear_revmap[hwirq];
  756. rcu_read_lock();
  757. data = radix_tree_lookup(&domain->revmap_tree, hwirq);
  758. rcu_read_unlock();
  759. return data ? data->irq : 0;
  760. }
  761. EXPORT_SYMBOL_GPL(irq_find_mapping);
  762. #ifdef CONFIG_IRQ_DOMAIN_DEBUG
  763. static void virq_debug_show_one(struct seq_file *m, struct irq_desc *desc)
  764. {
  765. struct irq_domain *domain;
  766. struct irq_data *data;
  767. domain = desc->irq_data.domain;
  768. data = &desc->irq_data;
  769. while (domain) {
  770. unsigned int irq = data->irq;
  771. unsigned long hwirq = data->hwirq;
  772. struct irq_chip *chip;
  773. bool direct;
  774. if (data == &desc->irq_data)
  775. seq_printf(m, "%5d ", irq);
  776. else
  777. seq_printf(m, "%5d+ ", irq);
  778. seq_printf(m, "0x%05lx ", hwirq);
  779. chip = irq_data_get_irq_chip(data);
  780. seq_printf(m, "%-15s ", (chip && chip->name) ? chip->name : "none");
  781. seq_printf(m, data ? "0x%p " : " %p ",
  782. irq_data_get_irq_chip_data(data));
  783. seq_printf(m, " %c ", (desc->action && desc->action->handler) ? '*' : ' ');
  784. direct = (irq == hwirq) && (irq < domain->revmap_direct_max_irq);
  785. seq_printf(m, "%6s%-8s ",
  786. (hwirq < domain->revmap_size) ? "LINEAR" : "RADIX",
  787. direct ? "(DIRECT)" : "");
  788. seq_printf(m, "%s\n", domain->name);
  789. #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
  790. domain = domain->parent;
  791. data = data->parent_data;
  792. #else
  793. domain = NULL;
  794. #endif
  795. }
  796. }
  797. static int virq_debug_show(struct seq_file *m, void *private)
  798. {
  799. unsigned long flags;
  800. struct irq_desc *desc;
  801. struct irq_domain *domain;
  802. struct radix_tree_iter iter;
  803. void **slot;
  804. int i;
  805. seq_printf(m, " %-16s %-6s %-10s %-10s %s\n",
  806. "name", "mapped", "linear-max", "direct-max", "devtree-node");
  807. mutex_lock(&irq_domain_mutex);
  808. list_for_each_entry(domain, &irq_domain_list, link) {
  809. struct device_node *of_node;
  810. const char *name;
  811. int count = 0;
  812. of_node = irq_domain_get_of_node(domain);
  813. if (of_node)
  814. name = of_node_full_name(of_node);
  815. else if (is_fwnode_irqchip(domain->fwnode))
  816. name = container_of(domain->fwnode, struct irqchip_fwid,
  817. fwnode)->name;
  818. else
  819. name = "";
  820. radix_tree_for_each_slot(slot, &domain->revmap_tree, &iter, 0)
  821. count++;
  822. seq_printf(m, "%c%-16s %6u %10u %10u %s\n",
  823. domain == irq_default_domain ? '*' : ' ', domain->name,
  824. domain->revmap_size + count, domain->revmap_size,
  825. domain->revmap_direct_max_irq,
  826. name);
  827. }
  828. mutex_unlock(&irq_domain_mutex);
  829. seq_printf(m, "%-5s %-7s %-15s %-*s %6s %-14s %s\n", "irq", "hwirq",
  830. "chip name", (int)(2 * sizeof(void *) + 2), "chip data",
  831. "active", "type", "domain");
  832. for (i = 1; i < nr_irqs; i++) {
  833. desc = irq_to_desc(i);
  834. if (!desc)
  835. continue;
  836. raw_spin_lock_irqsave(&desc->lock, flags);
  837. virq_debug_show_one(m, desc);
  838. raw_spin_unlock_irqrestore(&desc->lock, flags);
  839. }
  840. return 0;
  841. }
  842. static int virq_debug_open(struct inode *inode, struct file *file)
  843. {
  844. return single_open(file, virq_debug_show, inode->i_private);
  845. }
  846. static const struct file_operations virq_debug_fops = {
  847. .open = virq_debug_open,
  848. .read = seq_read,
  849. .llseek = seq_lseek,
  850. .release = single_release,
  851. };
  852. static int __init irq_debugfs_init(void)
  853. {
  854. if (debugfs_create_file("irq_domain_mapping", S_IRUGO, NULL,
  855. NULL, &virq_debug_fops) == NULL)
  856. return -ENOMEM;
  857. return 0;
  858. }
  859. __initcall(irq_debugfs_init);
  860. #endif /* CONFIG_IRQ_DOMAIN_DEBUG */
  861. /**
  862. * irq_domain_xlate_onecell() - Generic xlate for direct one cell bindings
  863. *
  864. * Device Tree IRQ specifier translation function which works with one cell
  865. * bindings where the cell value maps directly to the hwirq number.
  866. */
  867. int irq_domain_xlate_onecell(struct irq_domain *d, struct device_node *ctrlr,
  868. const u32 *intspec, unsigned int intsize,
  869. unsigned long *out_hwirq, unsigned int *out_type)
  870. {
  871. if (WARN_ON(intsize < 1))
  872. return -EINVAL;
  873. *out_hwirq = intspec[0];
  874. *out_type = IRQ_TYPE_NONE;
  875. return 0;
  876. }
  877. EXPORT_SYMBOL_GPL(irq_domain_xlate_onecell);
  878. /**
  879. * irq_domain_xlate_twocell() - Generic xlate for direct two cell bindings
  880. *
  881. * Device Tree IRQ specifier translation function which works with two cell
  882. * bindings where the cell values map directly to the hwirq number
  883. * and linux irq flags.
  884. */
  885. int irq_domain_xlate_twocell(struct irq_domain *d, struct device_node *ctrlr,
  886. const u32 *intspec, unsigned int intsize,
  887. irq_hw_number_t *out_hwirq, unsigned int *out_type)
  888. {
  889. if (WARN_ON(intsize < 2))
  890. return -EINVAL;
  891. *out_hwirq = intspec[0];
  892. *out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
  893. return 0;
  894. }
  895. EXPORT_SYMBOL_GPL(irq_domain_xlate_twocell);
  896. /**
  897. * irq_domain_xlate_onetwocell() - Generic xlate for one or two cell bindings
  898. *
  899. * Device Tree IRQ specifier translation function which works with either one
  900. * or two cell bindings where the cell values map directly to the hwirq number
  901. * and linux irq flags.
  902. *
  903. * Note: don't use this function unless your interrupt controller explicitly
  904. * supports both one and two cell bindings. For the majority of controllers
  905. * the _onecell() or _twocell() variants above should be used.
  906. */
  907. int irq_domain_xlate_onetwocell(struct irq_domain *d,
  908. struct device_node *ctrlr,
  909. const u32 *intspec, unsigned int intsize,
  910. unsigned long *out_hwirq, unsigned int *out_type)
  911. {
  912. if (WARN_ON(intsize < 1))
  913. return -EINVAL;
  914. *out_hwirq = intspec[0];
  915. if (intsize > 1)
  916. *out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
  917. else
  918. *out_type = IRQ_TYPE_NONE;
  919. return 0;
  920. }
  921. EXPORT_SYMBOL_GPL(irq_domain_xlate_onetwocell);
  922. const struct irq_domain_ops irq_domain_simple_ops = {
  923. .xlate = irq_domain_xlate_onetwocell,
  924. };
  925. EXPORT_SYMBOL_GPL(irq_domain_simple_ops);
  926. int irq_domain_alloc_descs(int virq, unsigned int cnt, irq_hw_number_t hwirq,
  927. int node, const struct cpumask *affinity)
  928. {
  929. unsigned int hint;
  930. if (virq >= 0) {
  931. virq = __irq_alloc_descs(virq, virq, cnt, node, THIS_MODULE,
  932. affinity);
  933. } else {
  934. hint = hwirq % nr_irqs;
  935. if (hint == 0)
  936. hint++;
  937. virq = __irq_alloc_descs(-1, hint, cnt, node, THIS_MODULE,
  938. affinity);
  939. if (virq <= 0 && hint > 1) {
  940. virq = __irq_alloc_descs(-1, 1, cnt, node, THIS_MODULE,
  941. affinity);
  942. }
  943. }
  944. return virq;
  945. }
  946. #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
  947. /**
  948. * irq_domain_create_hierarchy - Add a irqdomain into the hierarchy
  949. * @parent: Parent irq domain to associate with the new domain
  950. * @flags: Irq domain flags associated to the domain
  951. * @size: Size of the domain. See below
  952. * @fwnode: Optional fwnode of the interrupt controller
  953. * @ops: Pointer to the interrupt domain callbacks
  954. * @host_data: Controller private data pointer
  955. *
  956. * If @size is 0 a tree domain is created, otherwise a linear domain.
  957. *
  958. * If successful the parent is associated to the new domain and the
  959. * domain flags are set.
  960. * Returns pointer to IRQ domain, or NULL on failure.
  961. */
  962. struct irq_domain *irq_domain_create_hierarchy(struct irq_domain *parent,
  963. unsigned int flags,
  964. unsigned int size,
  965. struct fwnode_handle *fwnode,
  966. const struct irq_domain_ops *ops,
  967. void *host_data)
  968. {
  969. struct irq_domain *domain;
  970. if (size)
  971. domain = irq_domain_create_linear(fwnode, size, ops, host_data);
  972. else
  973. domain = irq_domain_create_tree(fwnode, ops, host_data);
  974. if (domain) {
  975. domain->parent = parent;
  976. domain->flags |= flags;
  977. }
  978. return domain;
  979. }
  980. EXPORT_SYMBOL_GPL(irq_domain_create_hierarchy);
  981. static void irq_domain_insert_irq(int virq)
  982. {
  983. struct irq_data *data;
  984. for (data = irq_get_irq_data(virq); data; data = data->parent_data) {
  985. struct irq_domain *domain = data->domain;
  986. irq_hw_number_t hwirq = data->hwirq;
  987. domain->mapcount++;
  988. if (hwirq < domain->revmap_size) {
  989. domain->linear_revmap[hwirq] = virq;
  990. } else {
  991. mutex_lock(&revmap_trees_mutex);
  992. radix_tree_insert(&domain->revmap_tree, hwirq, data);
  993. mutex_unlock(&revmap_trees_mutex);
  994. }
  995. /* If not already assigned, give the domain the chip's name */
  996. if (!domain->name && data->chip)
  997. domain->name = data->chip->name;
  998. }
  999. irq_clear_status_flags(virq, IRQ_NOREQUEST);
  1000. }
  1001. static void irq_domain_remove_irq(int virq)
  1002. {
  1003. struct irq_data *data;
  1004. irq_set_status_flags(virq, IRQ_NOREQUEST);
  1005. irq_set_chip_and_handler(virq, NULL, NULL);
  1006. synchronize_irq(virq);
  1007. smp_mb();
  1008. for (data = irq_get_irq_data(virq); data; data = data->parent_data) {
  1009. struct irq_domain *domain = data->domain;
  1010. irq_hw_number_t hwirq = data->hwirq;
  1011. domain->mapcount--;
  1012. if (hwirq < domain->revmap_size) {
  1013. domain->linear_revmap[hwirq] = 0;
  1014. } else {
  1015. mutex_lock(&revmap_trees_mutex);
  1016. radix_tree_delete(&domain->revmap_tree, hwirq);
  1017. mutex_unlock(&revmap_trees_mutex);
  1018. }
  1019. }
  1020. }
  1021. static struct irq_data *irq_domain_insert_irq_data(struct irq_domain *domain,
  1022. struct irq_data *child)
  1023. {
  1024. struct irq_data *irq_data;
  1025. irq_data = kzalloc_node(sizeof(*irq_data), GFP_KERNEL,
  1026. irq_data_get_node(child));
  1027. if (irq_data) {
  1028. child->parent_data = irq_data;
  1029. irq_data->irq = child->irq;
  1030. irq_data->common = child->common;
  1031. irq_data->domain = domain;
  1032. }
  1033. return irq_data;
  1034. }
  1035. static void irq_domain_free_irq_data(unsigned int virq, unsigned int nr_irqs)
  1036. {
  1037. struct irq_data *irq_data, *tmp;
  1038. int i;
  1039. for (i = 0; i < nr_irqs; i++) {
  1040. irq_data = irq_get_irq_data(virq + i);
  1041. tmp = irq_data->parent_data;
  1042. irq_data->parent_data = NULL;
  1043. irq_data->domain = NULL;
  1044. while (tmp) {
  1045. irq_data = tmp;
  1046. tmp = tmp->parent_data;
  1047. kfree(irq_data);
  1048. }
  1049. }
  1050. }
  1051. static int irq_domain_alloc_irq_data(struct irq_domain *domain,
  1052. unsigned int virq, unsigned int nr_irqs)
  1053. {
  1054. struct irq_data *irq_data;
  1055. struct irq_domain *parent;
  1056. int i;
  1057. /* The outermost irq_data is embedded in struct irq_desc */
  1058. for (i = 0; i < nr_irqs; i++) {
  1059. irq_data = irq_get_irq_data(virq + i);
  1060. irq_data->domain = domain;
  1061. for (parent = domain->parent; parent; parent = parent->parent) {
  1062. irq_data = irq_domain_insert_irq_data(parent, irq_data);
  1063. if (!irq_data) {
  1064. irq_domain_free_irq_data(virq, i + 1);
  1065. return -ENOMEM;
  1066. }
  1067. }
  1068. }
  1069. return 0;
  1070. }
  1071. /**
  1072. * irq_domain_get_irq_data - Get irq_data associated with @virq and @domain
  1073. * @domain: domain to match
  1074. * @virq: IRQ number to get irq_data
  1075. */
  1076. struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain,
  1077. unsigned int virq)
  1078. {
  1079. struct irq_data *irq_data;
  1080. for (irq_data = irq_get_irq_data(virq); irq_data;
  1081. irq_data = irq_data->parent_data)
  1082. if (irq_data->domain == domain)
  1083. return irq_data;
  1084. return NULL;
  1085. }
  1086. EXPORT_SYMBOL_GPL(irq_domain_get_irq_data);
  1087. /**
  1088. * irq_domain_set_hwirq_and_chip - Set hwirq and irqchip of @virq at @domain
  1089. * @domain: Interrupt domain to match
  1090. * @virq: IRQ number
  1091. * @hwirq: The hwirq number
  1092. * @chip: The associated interrupt chip
  1093. * @chip_data: The associated chip data
  1094. */
  1095. int irq_domain_set_hwirq_and_chip(struct irq_domain *domain, unsigned int virq,
  1096. irq_hw_number_t hwirq, struct irq_chip *chip,
  1097. void *chip_data)
  1098. {
  1099. struct irq_data *irq_data = irq_domain_get_irq_data(domain, virq);
  1100. if (!irq_data)
  1101. return -ENOENT;
  1102. irq_data->hwirq = hwirq;
  1103. irq_data->chip = chip ? chip : &no_irq_chip;
  1104. irq_data->chip_data = chip_data;
  1105. return 0;
  1106. }
  1107. EXPORT_SYMBOL_GPL(irq_domain_set_hwirq_and_chip);
  1108. /**
  1109. * irq_domain_set_info - Set the complete data for a @virq in @domain
  1110. * @domain: Interrupt domain to match
  1111. * @virq: IRQ number
  1112. * @hwirq: The hardware interrupt number
  1113. * @chip: The associated interrupt chip
  1114. * @chip_data: The associated interrupt chip data
  1115. * @handler: The interrupt flow handler
  1116. * @handler_data: The interrupt flow handler data
  1117. * @handler_name: The interrupt handler name
  1118. */
  1119. void irq_domain_set_info(struct irq_domain *domain, unsigned int virq,
  1120. irq_hw_number_t hwirq, struct irq_chip *chip,
  1121. void *chip_data, irq_flow_handler_t handler,
  1122. void *handler_data, const char *handler_name)
  1123. {
  1124. irq_domain_set_hwirq_and_chip(domain, virq, hwirq, chip, chip_data);
  1125. __irq_set_handler(virq, handler, 0, handler_name);
  1126. irq_set_handler_data(virq, handler_data);
  1127. }
  1128. EXPORT_SYMBOL(irq_domain_set_info);
  1129. /**
  1130. * irq_domain_reset_irq_data - Clear hwirq, chip and chip_data in @irq_data
  1131. * @irq_data: The pointer to irq_data
  1132. */
  1133. void irq_domain_reset_irq_data(struct irq_data *irq_data)
  1134. {
  1135. irq_data->hwirq = 0;
  1136. irq_data->chip = &no_irq_chip;
  1137. irq_data->chip_data = NULL;
  1138. }
  1139. EXPORT_SYMBOL_GPL(irq_domain_reset_irq_data);
  1140. /**
  1141. * irq_domain_free_irqs_common - Clear irq_data and free the parent
  1142. * @domain: Interrupt domain to match
  1143. * @virq: IRQ number to start with
  1144. * @nr_irqs: The number of irqs to free
  1145. */
  1146. void irq_domain_free_irqs_common(struct irq_domain *domain, unsigned int virq,
  1147. unsigned int nr_irqs)
  1148. {
  1149. struct irq_data *irq_data;
  1150. int i;
  1151. for (i = 0; i < nr_irqs; i++) {
  1152. irq_data = irq_domain_get_irq_data(domain, virq + i);
  1153. if (irq_data)
  1154. irq_domain_reset_irq_data(irq_data);
  1155. }
  1156. irq_domain_free_irqs_parent(domain, virq, nr_irqs);
  1157. }
  1158. EXPORT_SYMBOL_GPL(irq_domain_free_irqs_common);
  1159. /**
  1160. * irq_domain_free_irqs_top - Clear handler and handler data, clear irqdata and free parent
  1161. * @domain: Interrupt domain to match
  1162. * @virq: IRQ number to start with
  1163. * @nr_irqs: The number of irqs to free
  1164. */
  1165. void irq_domain_free_irqs_top(struct irq_domain *domain, unsigned int virq,
  1166. unsigned int nr_irqs)
  1167. {
  1168. int i;
  1169. for (i = 0; i < nr_irqs; i++) {
  1170. irq_set_handler_data(virq + i, NULL);
  1171. irq_set_handler(virq + i, NULL);
  1172. }
  1173. irq_domain_free_irqs_common(domain, virq, nr_irqs);
  1174. }
  1175. static void irq_domain_free_irqs_hierarchy(struct irq_domain *domain,
  1176. unsigned int irq_base,
  1177. unsigned int nr_irqs)
  1178. {
  1179. domain->ops->free(domain, irq_base, nr_irqs);
  1180. }
  1181. int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain,
  1182. unsigned int irq_base,
  1183. unsigned int nr_irqs, void *arg)
  1184. {
  1185. return domain->ops->alloc(domain, irq_base, nr_irqs, arg);
  1186. }
  1187. /**
  1188. * __irq_domain_alloc_irqs - Allocate IRQs from domain
  1189. * @domain: domain to allocate from
  1190. * @irq_base: allocate specified IRQ nubmer if irq_base >= 0
  1191. * @nr_irqs: number of IRQs to allocate
  1192. * @node: NUMA node id for memory allocation
  1193. * @arg: domain specific argument
  1194. * @realloc: IRQ descriptors have already been allocated if true
  1195. * @affinity: Optional irq affinity mask for multiqueue devices
  1196. *
  1197. * Allocate IRQ numbers and initialized all data structures to support
  1198. * hierarchy IRQ domains.
  1199. * Parameter @realloc is mainly to support legacy IRQs.
  1200. * Returns error code or allocated IRQ number
  1201. *
  1202. * The whole process to setup an IRQ has been split into two steps.
  1203. * The first step, __irq_domain_alloc_irqs(), is to allocate IRQ
  1204. * descriptor and required hardware resources. The second step,
  1205. * irq_domain_activate_irq(), is to program hardwares with preallocated
  1206. * resources. In this way, it's easier to rollback when failing to
  1207. * allocate resources.
  1208. */
  1209. int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
  1210. unsigned int nr_irqs, int node, void *arg,
  1211. bool realloc, const struct cpumask *affinity)
  1212. {
  1213. int i, ret, virq;
  1214. if (domain == NULL) {
  1215. domain = irq_default_domain;
  1216. if (WARN(!domain, "domain is NULL; cannot allocate IRQ\n"))
  1217. return -EINVAL;
  1218. }
  1219. if (!domain->ops->alloc) {
  1220. pr_debug("domain->ops->alloc() is NULL\n");
  1221. return -ENOSYS;
  1222. }
  1223. if (realloc && irq_base >= 0) {
  1224. virq = irq_base;
  1225. } else {
  1226. virq = irq_domain_alloc_descs(irq_base, nr_irqs, 0, node,
  1227. affinity);
  1228. if (virq < 0) {
  1229. pr_debug("cannot allocate IRQ(base %d, count %d)\n",
  1230. irq_base, nr_irqs);
  1231. return virq;
  1232. }
  1233. }
  1234. if (irq_domain_alloc_irq_data(domain, virq, nr_irqs)) {
  1235. pr_debug("cannot allocate memory for IRQ%d\n", virq);
  1236. ret = -ENOMEM;
  1237. goto out_free_desc;
  1238. }
  1239. mutex_lock(&irq_domain_mutex);
  1240. ret = irq_domain_alloc_irqs_hierarchy(domain, virq, nr_irqs, arg);
  1241. if (ret < 0) {
  1242. mutex_unlock(&irq_domain_mutex);
  1243. goto out_free_irq_data;
  1244. }
  1245. for (i = 0; i < nr_irqs; i++)
  1246. irq_domain_insert_irq(virq + i);
  1247. mutex_unlock(&irq_domain_mutex);
  1248. return virq;
  1249. out_free_irq_data:
  1250. irq_domain_free_irq_data(virq, nr_irqs);
  1251. out_free_desc:
  1252. irq_free_descs(virq, nr_irqs);
  1253. return ret;
  1254. }
  1255. /**
  1256. * irq_domain_free_irqs - Free IRQ number and associated data structures
  1257. * @virq: base IRQ number
  1258. * @nr_irqs: number of IRQs to free
  1259. */
  1260. void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs)
  1261. {
  1262. struct irq_data *data = irq_get_irq_data(virq);
  1263. int i;
  1264. if (WARN(!data || !data->domain || !data->domain->ops->free,
  1265. "NULL pointer, cannot free irq\n"))
  1266. return;
  1267. mutex_lock(&irq_domain_mutex);
  1268. for (i = 0; i < nr_irqs; i++)
  1269. irq_domain_remove_irq(virq + i);
  1270. irq_domain_free_irqs_hierarchy(data->domain, virq, nr_irqs);
  1271. mutex_unlock(&irq_domain_mutex);
  1272. irq_domain_free_irq_data(virq, nr_irqs);
  1273. irq_free_descs(virq, nr_irqs);
  1274. }
  1275. /**
  1276. * irq_domain_alloc_irqs_parent - Allocate interrupts from parent domain
  1277. * @irq_base: Base IRQ number
  1278. * @nr_irqs: Number of IRQs to allocate
  1279. * @arg: Allocation data (arch/domain specific)
  1280. *
  1281. * Check whether the domain has been setup recursive. If not allocate
  1282. * through the parent domain.
  1283. */
  1284. int irq_domain_alloc_irqs_parent(struct irq_domain *domain,
  1285. unsigned int irq_base, unsigned int nr_irqs,
  1286. void *arg)
  1287. {
  1288. if (!domain->parent)
  1289. return -ENOSYS;
  1290. return irq_domain_alloc_irqs_hierarchy(domain->parent, irq_base,
  1291. nr_irqs, arg);
  1292. }
  1293. EXPORT_SYMBOL_GPL(irq_domain_alloc_irqs_parent);
  1294. /**
  1295. * irq_domain_free_irqs_parent - Free interrupts from parent domain
  1296. * @irq_base: Base IRQ number
  1297. * @nr_irqs: Number of IRQs to free
  1298. *
  1299. * Check whether the domain has been setup recursive. If not free
  1300. * through the parent domain.
  1301. */
  1302. void irq_domain_free_irqs_parent(struct irq_domain *domain,
  1303. unsigned int irq_base, unsigned int nr_irqs)
  1304. {
  1305. if (!domain->parent)
  1306. return;
  1307. irq_domain_free_irqs_hierarchy(domain->parent, irq_base, nr_irqs);
  1308. }
  1309. EXPORT_SYMBOL_GPL(irq_domain_free_irqs_parent);
  1310. static void __irq_domain_activate_irq(struct irq_data *irq_data)
  1311. {
  1312. if (irq_data && irq_data->domain) {
  1313. struct irq_domain *domain = irq_data->domain;
  1314. if (irq_data->parent_data)
  1315. __irq_domain_activate_irq(irq_data->parent_data);
  1316. if (domain->ops->activate)
  1317. domain->ops->activate(domain, irq_data);
  1318. }
  1319. }
  1320. static void __irq_domain_deactivate_irq(struct irq_data *irq_data)
  1321. {
  1322. if (irq_data && irq_data->domain) {
  1323. struct irq_domain *domain = irq_data->domain;
  1324. if (domain->ops->deactivate)
  1325. domain->ops->deactivate(domain, irq_data);
  1326. if (irq_data->parent_data)
  1327. __irq_domain_deactivate_irq(irq_data->parent_data);
  1328. }
  1329. }
  1330. /**
  1331. * irq_domain_activate_irq - Call domain_ops->activate recursively to activate
  1332. * interrupt
  1333. * @irq_data: outermost irq_data associated with interrupt
  1334. *
  1335. * This is the second step to call domain_ops->activate to program interrupt
  1336. * controllers, so the interrupt could actually get delivered.
  1337. */
  1338. void irq_domain_activate_irq(struct irq_data *irq_data)
  1339. {
  1340. if (!irqd_is_activated(irq_data)) {
  1341. __irq_domain_activate_irq(irq_data);
  1342. irqd_set_activated(irq_data);
  1343. }
  1344. }
  1345. /**
  1346. * irq_domain_deactivate_irq - Call domain_ops->deactivate recursively to
  1347. * deactivate interrupt
  1348. * @irq_data: outermost irq_data associated with interrupt
  1349. *
  1350. * It calls domain_ops->deactivate to program interrupt controllers to disable
  1351. * interrupt delivery.
  1352. */
  1353. void irq_domain_deactivate_irq(struct irq_data *irq_data)
  1354. {
  1355. if (irqd_is_activated(irq_data)) {
  1356. __irq_domain_deactivate_irq(irq_data);
  1357. irqd_clr_activated(irq_data);
  1358. }
  1359. }
  1360. static void irq_domain_check_hierarchy(struct irq_domain *domain)
  1361. {
  1362. /* Hierarchy irq_domains must implement callback alloc() */
  1363. if (domain->ops->alloc)
  1364. domain->flags |= IRQ_DOMAIN_FLAG_HIERARCHY;
  1365. }
  1366. /**
  1367. * irq_domain_hierarchical_is_msi_remap - Check if the domain or any
  1368. * parent has MSI remapping support
  1369. * @domain: domain pointer
  1370. */
  1371. bool irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain)
  1372. {
  1373. for (; domain; domain = domain->parent) {
  1374. if (irq_domain_is_msi_remap(domain))
  1375. return true;
  1376. }
  1377. return false;
  1378. }
  1379. #else /* CONFIG_IRQ_DOMAIN_HIERARCHY */
  1380. /**
  1381. * irq_domain_get_irq_data - Get irq_data associated with @virq and @domain
  1382. * @domain: domain to match
  1383. * @virq: IRQ number to get irq_data
  1384. */
  1385. struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain,
  1386. unsigned int virq)
  1387. {
  1388. struct irq_data *irq_data = irq_get_irq_data(virq);
  1389. return (irq_data && irq_data->domain == domain) ? irq_data : NULL;
  1390. }
  1391. EXPORT_SYMBOL_GPL(irq_domain_get_irq_data);
  1392. /**
  1393. * irq_domain_set_info - Set the complete data for a @virq in @domain
  1394. * @domain: Interrupt domain to match
  1395. * @virq: IRQ number
  1396. * @hwirq: The hardware interrupt number
  1397. * @chip: The associated interrupt chip
  1398. * @chip_data: The associated interrupt chip data
  1399. * @handler: The interrupt flow handler
  1400. * @handler_data: The interrupt flow handler data
  1401. * @handler_name: The interrupt handler name
  1402. */
  1403. void irq_domain_set_info(struct irq_domain *domain, unsigned int virq,
  1404. irq_hw_number_t hwirq, struct irq_chip *chip,
  1405. void *chip_data, irq_flow_handler_t handler,
  1406. void *handler_data, const char *handler_name)
  1407. {
  1408. irq_set_chip_and_handler_name(virq, chip, handler, handler_name);
  1409. irq_set_chip_data(virq, chip_data);
  1410. irq_set_handler_data(virq, handler_data);
  1411. }
  1412. static void irq_domain_check_hierarchy(struct irq_domain *domain)
  1413. {
  1414. }
  1415. #endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */
  1416. #ifdef CONFIG_GENERIC_IRQ_DEBUGFS
  1417. static struct dentry *domain_dir;
  1418. static void
  1419. irq_domain_debug_show_one(struct seq_file *m, struct irq_domain *d, int ind)
  1420. {
  1421. seq_printf(m, "%*sname: %s\n", ind, "", d->name);
  1422. seq_printf(m, "%*ssize: %u\n", ind + 1, "",
  1423. d->revmap_size + d->revmap_direct_max_irq);
  1424. seq_printf(m, "%*smapped: %u\n", ind + 1, "", d->mapcount);
  1425. seq_printf(m, "%*sflags: 0x%08x\n", ind +1 , "", d->flags);
  1426. #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
  1427. if (!d->parent)
  1428. return;
  1429. seq_printf(m, "%*sparent: %s\n", ind + 1, "", d->parent->name);
  1430. irq_domain_debug_show_one(m, d->parent, ind + 4);
  1431. #endif
  1432. }
  1433. static int irq_domain_debug_show(struct seq_file *m, void *p)
  1434. {
  1435. struct irq_domain *d = m->private;
  1436. /* Default domain? Might be NULL */
  1437. if (!d) {
  1438. if (!irq_default_domain)
  1439. return 0;
  1440. d = irq_default_domain;
  1441. }
  1442. irq_domain_debug_show_one(m, d, 0);
  1443. return 0;
  1444. }
  1445. static int irq_domain_debug_open(struct inode *inode, struct file *file)
  1446. {
  1447. return single_open(file, irq_domain_debug_show, inode->i_private);
  1448. }
  1449. static const struct file_operations dfs_domain_ops = {
  1450. .open = irq_domain_debug_open,
  1451. .read = seq_read,
  1452. .llseek = seq_lseek,
  1453. .release = single_release,
  1454. };
  1455. static void debugfs_add_domain_dir(struct irq_domain *d)
  1456. {
  1457. if (!d->name || !domain_dir || d->debugfs_file)
  1458. return;
  1459. d->debugfs_file = debugfs_create_file(d->name, 0444, domain_dir, d,
  1460. &dfs_domain_ops);
  1461. }
  1462. static void debugfs_remove_domain_dir(struct irq_domain *d)
  1463. {
  1464. debugfs_remove(d->debugfs_file);
  1465. }
  1466. void __init irq_domain_debugfs_init(struct dentry *root)
  1467. {
  1468. struct irq_domain *d;
  1469. domain_dir = debugfs_create_dir("domains", root);
  1470. if (!domain_dir)
  1471. return;
  1472. debugfs_create_file("default", 0444, domain_dir, NULL, &dfs_domain_ops);
  1473. mutex_lock(&irq_domain_mutex);
  1474. list_for_each_entry(d, &irq_domain_list, link)
  1475. debugfs_add_domain_dir(d);
  1476. mutex_unlock(&irq_domain_mutex);
  1477. }
  1478. #endif