irqdomain.c 37 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319
  1. #define pr_fmt(fmt) "irq: " fmt
  2. #include <linux/debugfs.h>
  3. #include <linux/hardirq.h>
  4. #include <linux/interrupt.h>
  5. #include <linux/irq.h>
  6. #include <linux/irqdesc.h>
  7. #include <linux/irqdomain.h>
  8. #include <linux/module.h>
  9. #include <linux/mutex.h>
  10. #include <linux/of.h>
  11. #include <linux/of_address.h>
  12. #include <linux/of_irq.h>
  13. #include <linux/topology.h>
  14. #include <linux/seq_file.h>
  15. #include <linux/slab.h>
  16. #include <linux/smp.h>
  17. #include <linux/fs.h>
  18. static LIST_HEAD(irq_domain_list);
  19. static DEFINE_MUTEX(irq_domain_mutex);
  20. static DEFINE_MUTEX(revmap_trees_mutex);
  21. static struct irq_domain *irq_default_domain;
  22. static int irq_domain_alloc_descs(int virq, unsigned int nr_irqs,
  23. irq_hw_number_t hwirq, int node);
  24. static void irq_domain_check_hierarchy(struct irq_domain *domain);
  25. /**
  26. * __irq_domain_add() - Allocate a new irq_domain data structure
  27. * @of_node: optional device-tree node of the interrupt controller
  28. * @size: Size of linear map; 0 for radix mapping only
  29. * @hwirq_max: Maximum number of interrupts supported by controller
  30. * @direct_max: Maximum value of direct maps; Use ~0 for no limit; 0 for no
  31. * direct mapping
  32. * @ops: domain callbacks
  33. * @host_data: Controller private data pointer
  34. *
  35. * Allocates and initialize and irq_domain structure.
  36. * Returns pointer to IRQ domain, or NULL on failure.
  37. */
  38. struct irq_domain *__irq_domain_add(struct device_node *of_node, int size,
  39. irq_hw_number_t hwirq_max, int direct_max,
  40. const struct irq_domain_ops *ops,
  41. void *host_data)
  42. {
  43. struct irq_domain *domain;
  44. struct fwnode_handle *fwnode;
  45. domain = kzalloc_node(sizeof(*domain) + (sizeof(unsigned int) * size),
  46. GFP_KERNEL, of_node_to_nid(of_node));
  47. if (WARN_ON(!domain))
  48. return NULL;
  49. of_node_get(of_node);
  50. fwnode = of_node ? &of_node->fwnode : NULL;
  51. /* Fill structure */
  52. INIT_RADIX_TREE(&domain->revmap_tree, GFP_KERNEL);
  53. domain->ops = ops;
  54. domain->host_data = host_data;
  55. domain->fwnode = fwnode;
  56. domain->hwirq_max = hwirq_max;
  57. domain->revmap_size = size;
  58. domain->revmap_direct_max_irq = direct_max;
  59. irq_domain_check_hierarchy(domain);
  60. mutex_lock(&irq_domain_mutex);
  61. list_add(&domain->link, &irq_domain_list);
  62. mutex_unlock(&irq_domain_mutex);
  63. pr_debug("Added domain %s\n", domain->name);
  64. return domain;
  65. }
  66. EXPORT_SYMBOL_GPL(__irq_domain_add);
  67. /**
  68. * irq_domain_remove() - Remove an irq domain.
  69. * @domain: domain to remove
  70. *
  71. * This routine is used to remove an irq domain. The caller must ensure
  72. * that all mappings within the domain have been disposed of prior to
  73. * use, depending on the revmap type.
  74. */
  75. void irq_domain_remove(struct irq_domain *domain)
  76. {
  77. mutex_lock(&irq_domain_mutex);
  78. /*
  79. * radix_tree_delete() takes care of destroying the root
  80. * node when all entries are removed. Shout if there are
  81. * any mappings left.
  82. */
  83. WARN_ON(domain->revmap_tree.height);
  84. list_del(&domain->link);
  85. /*
  86. * If the going away domain is the default one, reset it.
  87. */
  88. if (unlikely(irq_default_domain == domain))
  89. irq_set_default_host(NULL);
  90. mutex_unlock(&irq_domain_mutex);
  91. pr_debug("Removed domain %s\n", domain->name);
  92. of_node_put(irq_domain_get_of_node(domain));
  93. kfree(domain);
  94. }
  95. EXPORT_SYMBOL_GPL(irq_domain_remove);
  96. /**
  97. * irq_domain_add_simple() - Register an irq_domain and optionally map a range of irqs
  98. * @of_node: pointer to interrupt controller's device tree node.
  99. * @size: total number of irqs in mapping
  100. * @first_irq: first number of irq block assigned to the domain,
  101. * pass zero to assign irqs on-the-fly. If first_irq is non-zero, then
  102. * pre-map all of the irqs in the domain to virqs starting at first_irq.
  103. * @ops: domain callbacks
  104. * @host_data: Controller private data pointer
  105. *
  106. * Allocates an irq_domain, and optionally if first_irq is positive then also
  107. * allocate irq_descs and map all of the hwirqs to virqs starting at first_irq.
  108. *
  109. * This is intended to implement the expected behaviour for most
  110. * interrupt controllers. If device tree is used, then first_irq will be 0 and
  111. * irqs get mapped dynamically on the fly. However, if the controller requires
  112. * static virq assignments (non-DT boot) then it will set that up correctly.
  113. */
  114. struct irq_domain *irq_domain_add_simple(struct device_node *of_node,
  115. unsigned int size,
  116. unsigned int first_irq,
  117. const struct irq_domain_ops *ops,
  118. void *host_data)
  119. {
  120. struct irq_domain *domain;
  121. domain = __irq_domain_add(of_node, size, size, 0, ops, host_data);
  122. if (!domain)
  123. return NULL;
  124. if (first_irq > 0) {
  125. if (IS_ENABLED(CONFIG_SPARSE_IRQ)) {
  126. /* attempt to allocated irq_descs */
  127. int rc = irq_alloc_descs(first_irq, first_irq, size,
  128. of_node_to_nid(of_node));
  129. if (rc < 0)
  130. pr_info("Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n",
  131. first_irq);
  132. }
  133. irq_domain_associate_many(domain, first_irq, 0, size);
  134. }
  135. return domain;
  136. }
  137. EXPORT_SYMBOL_GPL(irq_domain_add_simple);
  138. /**
  139. * irq_domain_add_legacy() - Allocate and register a legacy revmap irq_domain.
  140. * @of_node: pointer to interrupt controller's device tree node.
  141. * @size: total number of irqs in legacy mapping
  142. * @first_irq: first number of irq block assigned to the domain
  143. * @first_hwirq: first hwirq number to use for the translation. Should normally
  144. * be '0', but a positive integer can be used if the effective
  145. * hwirqs numbering does not begin at zero.
  146. * @ops: map/unmap domain callbacks
  147. * @host_data: Controller private data pointer
  148. *
  149. * Note: the map() callback will be called before this function returns
  150. * for all legacy interrupts except 0 (which is always the invalid irq for
  151. * a legacy controller).
  152. */
  153. struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
  154. unsigned int size,
  155. unsigned int first_irq,
  156. irq_hw_number_t first_hwirq,
  157. const struct irq_domain_ops *ops,
  158. void *host_data)
  159. {
  160. struct irq_domain *domain;
  161. domain = __irq_domain_add(of_node, first_hwirq + size,
  162. first_hwirq + size, 0, ops, host_data);
  163. if (domain)
  164. irq_domain_associate_many(domain, first_irq, first_hwirq, size);
  165. return domain;
  166. }
  167. EXPORT_SYMBOL_GPL(irq_domain_add_legacy);
  168. /**
  169. * irq_find_matching_fwnode() - Locates a domain for a given fwnode
  170. * @fwnode: FW descriptor of the interrupt controller
  171. * @bus_token: domain-specific data
  172. */
  173. struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode,
  174. enum irq_domain_bus_token bus_token)
  175. {
  176. struct irq_domain *h, *found = NULL;
  177. int rc;
  178. /* We might want to match the legacy controller last since
  179. * it might potentially be set to match all interrupts in
  180. * the absence of a device node. This isn't a problem so far
  181. * yet though...
  182. *
  183. * bus_token == DOMAIN_BUS_ANY matches any domain, any other
  184. * values must generate an exact match for the domain to be
  185. * selected.
  186. */
  187. mutex_lock(&irq_domain_mutex);
  188. list_for_each_entry(h, &irq_domain_list, link) {
  189. if (h->ops->match)
  190. rc = h->ops->match(h, to_of_node(fwnode), bus_token);
  191. else
  192. rc = ((fwnode != NULL) && (h->fwnode == fwnode) &&
  193. ((bus_token == DOMAIN_BUS_ANY) ||
  194. (h->bus_token == bus_token)));
  195. if (rc) {
  196. found = h;
  197. break;
  198. }
  199. }
  200. mutex_unlock(&irq_domain_mutex);
  201. return found;
  202. }
  203. EXPORT_SYMBOL_GPL(irq_find_matching_fwnode);
  204. /**
  205. * irq_set_default_host() - Set a "default" irq domain
  206. * @domain: default domain pointer
  207. *
  208. * For convenience, it's possible to set a "default" domain that will be used
  209. * whenever NULL is passed to irq_create_mapping(). It makes life easier for
  210. * platforms that want to manipulate a few hard coded interrupt numbers that
  211. * aren't properly represented in the device-tree.
  212. */
  213. void irq_set_default_host(struct irq_domain *domain)
  214. {
  215. pr_debug("Default domain set to @0x%p\n", domain);
  216. irq_default_domain = domain;
  217. }
  218. EXPORT_SYMBOL_GPL(irq_set_default_host);
  219. void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq)
  220. {
  221. struct irq_data *irq_data = irq_get_irq_data(irq);
  222. irq_hw_number_t hwirq;
  223. if (WARN(!irq_data || irq_data->domain != domain,
  224. "virq%i doesn't exist; cannot disassociate\n", irq))
  225. return;
  226. hwirq = irq_data->hwirq;
  227. irq_set_status_flags(irq, IRQ_NOREQUEST);
  228. /* remove chip and handler */
  229. irq_set_chip_and_handler(irq, NULL, NULL);
  230. /* Make sure it's completed */
  231. synchronize_irq(irq);
  232. /* Tell the PIC about it */
  233. if (domain->ops->unmap)
  234. domain->ops->unmap(domain, irq);
  235. smp_mb();
  236. irq_data->domain = NULL;
  237. irq_data->hwirq = 0;
  238. /* Clear reverse map for this hwirq */
  239. if (hwirq < domain->revmap_size) {
  240. domain->linear_revmap[hwirq] = 0;
  241. } else {
  242. mutex_lock(&revmap_trees_mutex);
  243. radix_tree_delete(&domain->revmap_tree, hwirq);
  244. mutex_unlock(&revmap_trees_mutex);
  245. }
  246. }
  247. int irq_domain_associate(struct irq_domain *domain, unsigned int virq,
  248. irq_hw_number_t hwirq)
  249. {
  250. struct irq_data *irq_data = irq_get_irq_data(virq);
  251. int ret;
  252. if (WARN(hwirq >= domain->hwirq_max,
  253. "error: hwirq 0x%x is too large for %s\n", (int)hwirq, domain->name))
  254. return -EINVAL;
  255. if (WARN(!irq_data, "error: virq%i is not allocated", virq))
  256. return -EINVAL;
  257. if (WARN(irq_data->domain, "error: virq%i is already associated", virq))
  258. return -EINVAL;
  259. mutex_lock(&irq_domain_mutex);
  260. irq_data->hwirq = hwirq;
  261. irq_data->domain = domain;
  262. if (domain->ops->map) {
  263. ret = domain->ops->map(domain, virq, hwirq);
  264. if (ret != 0) {
  265. /*
  266. * If map() returns -EPERM, this interrupt is protected
  267. * by the firmware or some other service and shall not
  268. * be mapped. Don't bother telling the user about it.
  269. */
  270. if (ret != -EPERM) {
  271. pr_info("%s didn't like hwirq-0x%lx to VIRQ%i mapping (rc=%d)\n",
  272. domain->name, hwirq, virq, ret);
  273. }
  274. irq_data->domain = NULL;
  275. irq_data->hwirq = 0;
  276. mutex_unlock(&irq_domain_mutex);
  277. return ret;
  278. }
  279. /* If not already assigned, give the domain the chip's name */
  280. if (!domain->name && irq_data->chip)
  281. domain->name = irq_data->chip->name;
  282. }
  283. if (hwirq < domain->revmap_size) {
  284. domain->linear_revmap[hwirq] = virq;
  285. } else {
  286. mutex_lock(&revmap_trees_mutex);
  287. radix_tree_insert(&domain->revmap_tree, hwirq, irq_data);
  288. mutex_unlock(&revmap_trees_mutex);
  289. }
  290. mutex_unlock(&irq_domain_mutex);
  291. irq_clear_status_flags(virq, IRQ_NOREQUEST);
  292. return 0;
  293. }
  294. EXPORT_SYMBOL_GPL(irq_domain_associate);
  295. void irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base,
  296. irq_hw_number_t hwirq_base, int count)
  297. {
  298. struct device_node *of_node;
  299. int i;
  300. of_node = irq_domain_get_of_node(domain);
  301. pr_debug("%s(%s, irqbase=%i, hwbase=%i, count=%i)\n", __func__,
  302. of_node_full_name(of_node), irq_base, (int)hwirq_base, count);
  303. for (i = 0; i < count; i++) {
  304. irq_domain_associate(domain, irq_base + i, hwirq_base + i);
  305. }
  306. }
  307. EXPORT_SYMBOL_GPL(irq_domain_associate_many);
  308. /**
  309. * irq_create_direct_mapping() - Allocate an irq for direct mapping
  310. * @domain: domain to allocate the irq for or NULL for default domain
  311. *
  312. * This routine is used for irq controllers which can choose the hardware
  313. * interrupt numbers they generate. In such a case it's simplest to use
  314. * the linux irq as the hardware interrupt number. It still uses the linear
  315. * or radix tree to store the mapping, but the irq controller can optimize
  316. * the revmap path by using the hwirq directly.
  317. */
  318. unsigned int irq_create_direct_mapping(struct irq_domain *domain)
  319. {
  320. struct device_node *of_node;
  321. unsigned int virq;
  322. if (domain == NULL)
  323. domain = irq_default_domain;
  324. of_node = irq_domain_get_of_node(domain);
  325. virq = irq_alloc_desc_from(1, of_node_to_nid(of_node));
  326. if (!virq) {
  327. pr_debug("create_direct virq allocation failed\n");
  328. return 0;
  329. }
  330. if (virq >= domain->revmap_direct_max_irq) {
  331. pr_err("ERROR: no free irqs available below %i maximum\n",
  332. domain->revmap_direct_max_irq);
  333. irq_free_desc(virq);
  334. return 0;
  335. }
  336. pr_debug("create_direct obtained virq %d\n", virq);
  337. if (irq_domain_associate(domain, virq, virq)) {
  338. irq_free_desc(virq);
  339. return 0;
  340. }
  341. return virq;
  342. }
  343. EXPORT_SYMBOL_GPL(irq_create_direct_mapping);
  344. /**
  345. * irq_create_mapping() - Map a hardware interrupt into linux irq space
  346. * @domain: domain owning this hardware interrupt or NULL for default domain
  347. * @hwirq: hardware irq number in that domain space
  348. *
  349. * Only one mapping per hardware interrupt is permitted. Returns a linux
  350. * irq number.
  351. * If the sense/trigger is to be specified, set_irq_type() should be called
  352. * on the number returned from that call.
  353. */
  354. unsigned int irq_create_mapping(struct irq_domain *domain,
  355. irq_hw_number_t hwirq)
  356. {
  357. struct device_node *of_node;
  358. int virq;
  359. pr_debug("irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq);
  360. /* Look for default domain if nececssary */
  361. if (domain == NULL)
  362. domain = irq_default_domain;
  363. if (domain == NULL) {
  364. WARN(1, "%s(, %lx) called with NULL domain\n", __func__, hwirq);
  365. return 0;
  366. }
  367. pr_debug("-> using domain @%p\n", domain);
  368. of_node = irq_domain_get_of_node(domain);
  369. /* Check if mapping already exists */
  370. virq = irq_find_mapping(domain, hwirq);
  371. if (virq) {
  372. pr_debug("-> existing mapping on virq %d\n", virq);
  373. return virq;
  374. }
  375. /* Allocate a virtual interrupt number */
  376. virq = irq_domain_alloc_descs(-1, 1, hwirq, of_node_to_nid(of_node));
  377. if (virq <= 0) {
  378. pr_debug("-> virq allocation failed\n");
  379. return 0;
  380. }
  381. if (irq_domain_associate(domain, virq, hwirq)) {
  382. irq_free_desc(virq);
  383. return 0;
  384. }
  385. pr_debug("irq %lu on domain %s mapped to virtual irq %u\n",
  386. hwirq, of_node_full_name(of_node), virq);
  387. return virq;
  388. }
  389. EXPORT_SYMBOL_GPL(irq_create_mapping);
  390. /**
  391. * irq_create_strict_mappings() - Map a range of hw irqs to fixed linux irqs
  392. * @domain: domain owning the interrupt range
  393. * @irq_base: beginning of linux IRQ range
  394. * @hwirq_base: beginning of hardware IRQ range
  395. * @count: Number of interrupts to map
  396. *
  397. * This routine is used for allocating and mapping a range of hardware
  398. * irqs to linux irqs where the linux irq numbers are at pre-defined
  399. * locations. For use by controllers that already have static mappings
  400. * to insert in to the domain.
  401. *
  402. * Non-linear users can use irq_create_identity_mapping() for IRQ-at-a-time
  403. * domain insertion.
  404. *
  405. * 0 is returned upon success, while any failure to establish a static
  406. * mapping is treated as an error.
  407. */
  408. int irq_create_strict_mappings(struct irq_domain *domain, unsigned int irq_base,
  409. irq_hw_number_t hwirq_base, int count)
  410. {
  411. struct device_node *of_node;
  412. int ret;
  413. of_node = irq_domain_get_of_node(domain);
  414. ret = irq_alloc_descs(irq_base, irq_base, count,
  415. of_node_to_nid(of_node));
  416. if (unlikely(ret < 0))
  417. return ret;
  418. irq_domain_associate_many(domain, irq_base, hwirq_base, count);
  419. return 0;
  420. }
  421. EXPORT_SYMBOL_GPL(irq_create_strict_mappings);
  422. static int irq_domain_translate(struct irq_domain *d,
  423. struct irq_fwspec *fwspec,
  424. irq_hw_number_t *hwirq, unsigned int *type)
  425. {
  426. #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
  427. if (d->ops->translate)
  428. return d->ops->translate(d, fwspec, hwirq, type);
  429. #endif
  430. if (d->ops->xlate)
  431. return d->ops->xlate(d, to_of_node(fwspec->fwnode),
  432. fwspec->param, fwspec->param_count,
  433. hwirq, type);
  434. /* If domain has no translation, then we assume interrupt line */
  435. *hwirq = fwspec->param[0];
  436. return 0;
  437. }
  438. static void of_phandle_args_to_fwspec(struct of_phandle_args *irq_data,
  439. struct irq_fwspec *fwspec)
  440. {
  441. int i;
  442. fwspec->fwnode = irq_data->np ? &irq_data->np->fwnode : NULL;
  443. fwspec->param_count = irq_data->args_count;
  444. for (i = 0; i < irq_data->args_count; i++)
  445. fwspec->param[i] = irq_data->args[i];
  446. }
  447. unsigned int irq_create_of_mapping(struct of_phandle_args *irq_data)
  448. {
  449. struct irq_fwspec fwspec;
  450. struct irq_domain *domain;
  451. irq_hw_number_t hwirq;
  452. unsigned int type = IRQ_TYPE_NONE;
  453. int virq;
  454. of_phandle_args_to_fwspec(irq_data, &fwspec);
  455. if (fwspec.fwnode)
  456. domain = irq_find_matching_fwnode(fwspec.fwnode, DOMAIN_BUS_ANY);
  457. else
  458. domain = irq_default_domain;
  459. if (!domain) {
  460. pr_warn("no irq domain found for %s !\n",
  461. of_node_full_name(to_of_node(fwspec.fwnode)));
  462. return 0;
  463. }
  464. if (irq_domain_translate(domain, &fwspec, &hwirq, &type))
  465. return 0;
  466. if (irq_domain_is_hierarchy(domain)) {
  467. /* Temporary hack */
  468. void *desc = &fwspec;
  469. #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
  470. if (!domain->ops->translate)
  471. desc = irq_data;
  472. #endif
  473. /*
  474. * If we've already configured this interrupt,
  475. * don't do it again, or hell will break loose.
  476. */
  477. virq = irq_find_mapping(domain, hwirq);
  478. if (virq)
  479. return virq;
  480. virq = irq_domain_alloc_irqs(domain, 1, NUMA_NO_NODE, desc);
  481. if (virq <= 0)
  482. return 0;
  483. } else {
  484. /* Create mapping */
  485. virq = irq_create_mapping(domain, hwirq);
  486. if (!virq)
  487. return virq;
  488. }
  489. /* Set type if specified and different than the current one */
  490. if (type != IRQ_TYPE_NONE &&
  491. type != irq_get_trigger_type(virq))
  492. irq_set_irq_type(virq, type);
  493. return virq;
  494. }
  495. EXPORT_SYMBOL_GPL(irq_create_of_mapping);
  496. /**
  497. * irq_dispose_mapping() - Unmap an interrupt
  498. * @virq: linux irq number of the interrupt to unmap
  499. */
  500. void irq_dispose_mapping(unsigned int virq)
  501. {
  502. struct irq_data *irq_data = irq_get_irq_data(virq);
  503. struct irq_domain *domain;
  504. if (!virq || !irq_data)
  505. return;
  506. domain = irq_data->domain;
  507. if (WARN_ON(domain == NULL))
  508. return;
  509. irq_domain_disassociate(domain, virq);
  510. irq_free_desc(virq);
  511. }
  512. EXPORT_SYMBOL_GPL(irq_dispose_mapping);
  513. /**
  514. * irq_find_mapping() - Find a linux irq from an hw irq number.
  515. * @domain: domain owning this hardware interrupt
  516. * @hwirq: hardware irq number in that domain space
  517. */
  518. unsigned int irq_find_mapping(struct irq_domain *domain,
  519. irq_hw_number_t hwirq)
  520. {
  521. struct irq_data *data;
  522. /* Look for default domain if nececssary */
  523. if (domain == NULL)
  524. domain = irq_default_domain;
  525. if (domain == NULL)
  526. return 0;
  527. if (hwirq < domain->revmap_direct_max_irq) {
  528. data = irq_domain_get_irq_data(domain, hwirq);
  529. if (data && data->hwirq == hwirq)
  530. return hwirq;
  531. }
  532. /* Check if the hwirq is in the linear revmap. */
  533. if (hwirq < domain->revmap_size)
  534. return domain->linear_revmap[hwirq];
  535. rcu_read_lock();
  536. data = radix_tree_lookup(&domain->revmap_tree, hwirq);
  537. rcu_read_unlock();
  538. return data ? data->irq : 0;
  539. }
  540. EXPORT_SYMBOL_GPL(irq_find_mapping);
  541. #ifdef CONFIG_IRQ_DOMAIN_DEBUG
  542. static int virq_debug_show(struct seq_file *m, void *private)
  543. {
  544. unsigned long flags;
  545. struct irq_desc *desc;
  546. struct irq_domain *domain;
  547. struct radix_tree_iter iter;
  548. void *data, **slot;
  549. int i;
  550. seq_printf(m, " %-16s %-6s %-10s %-10s %s\n",
  551. "name", "mapped", "linear-max", "direct-max", "devtree-node");
  552. mutex_lock(&irq_domain_mutex);
  553. list_for_each_entry(domain, &irq_domain_list, link) {
  554. struct device_node *of_node;
  555. int count = 0;
  556. of_node = irq_domain_get_of_node(domain);
  557. radix_tree_for_each_slot(slot, &domain->revmap_tree, &iter, 0)
  558. count++;
  559. seq_printf(m, "%c%-16s %6u %10u %10u %s\n",
  560. domain == irq_default_domain ? '*' : ' ', domain->name,
  561. domain->revmap_size + count, domain->revmap_size,
  562. domain->revmap_direct_max_irq,
  563. of_node ? of_node_full_name(of_node) : "");
  564. }
  565. mutex_unlock(&irq_domain_mutex);
  566. seq_printf(m, "%-5s %-7s %-15s %-*s %6s %-14s %s\n", "irq", "hwirq",
  567. "chip name", (int)(2 * sizeof(void *) + 2), "chip data",
  568. "active", "type", "domain");
  569. for (i = 1; i < nr_irqs; i++) {
  570. desc = irq_to_desc(i);
  571. if (!desc)
  572. continue;
  573. raw_spin_lock_irqsave(&desc->lock, flags);
  574. domain = desc->irq_data.domain;
  575. if (domain) {
  576. struct irq_chip *chip;
  577. int hwirq = desc->irq_data.hwirq;
  578. bool direct;
  579. seq_printf(m, "%5d ", i);
  580. seq_printf(m, "0x%05x ", hwirq);
  581. chip = irq_desc_get_chip(desc);
  582. seq_printf(m, "%-15s ", (chip && chip->name) ? chip->name : "none");
  583. data = irq_desc_get_chip_data(desc);
  584. seq_printf(m, data ? "0x%p " : " %p ", data);
  585. seq_printf(m, " %c ", (desc->action && desc->action->handler) ? '*' : ' ');
  586. direct = (i == hwirq) && (i < domain->revmap_direct_max_irq);
  587. seq_printf(m, "%6s%-8s ",
  588. (hwirq < domain->revmap_size) ? "LINEAR" : "RADIX",
  589. direct ? "(DIRECT)" : "");
  590. seq_printf(m, "%s\n", desc->irq_data.domain->name);
  591. }
  592. raw_spin_unlock_irqrestore(&desc->lock, flags);
  593. }
  594. return 0;
  595. }
  596. static int virq_debug_open(struct inode *inode, struct file *file)
  597. {
  598. return single_open(file, virq_debug_show, inode->i_private);
  599. }
  600. static const struct file_operations virq_debug_fops = {
  601. .open = virq_debug_open,
  602. .read = seq_read,
  603. .llseek = seq_lseek,
  604. .release = single_release,
  605. };
  606. static int __init irq_debugfs_init(void)
  607. {
  608. if (debugfs_create_file("irq_domain_mapping", S_IRUGO, NULL,
  609. NULL, &virq_debug_fops) == NULL)
  610. return -ENOMEM;
  611. return 0;
  612. }
  613. __initcall(irq_debugfs_init);
  614. #endif /* CONFIG_IRQ_DOMAIN_DEBUG */
  615. /**
  616. * irq_domain_xlate_onecell() - Generic xlate for direct one cell bindings
  617. *
  618. * Device Tree IRQ specifier translation function which works with one cell
  619. * bindings where the cell value maps directly to the hwirq number.
  620. */
  621. int irq_domain_xlate_onecell(struct irq_domain *d, struct device_node *ctrlr,
  622. const u32 *intspec, unsigned int intsize,
  623. unsigned long *out_hwirq, unsigned int *out_type)
  624. {
  625. if (WARN_ON(intsize < 1))
  626. return -EINVAL;
  627. *out_hwirq = intspec[0];
  628. *out_type = IRQ_TYPE_NONE;
  629. return 0;
  630. }
  631. EXPORT_SYMBOL_GPL(irq_domain_xlate_onecell);
  632. /**
  633. * irq_domain_xlate_twocell() - Generic xlate for direct two cell bindings
  634. *
  635. * Device Tree IRQ specifier translation function which works with two cell
  636. * bindings where the cell values map directly to the hwirq number
  637. * and linux irq flags.
  638. */
  639. int irq_domain_xlate_twocell(struct irq_domain *d, struct device_node *ctrlr,
  640. const u32 *intspec, unsigned int intsize,
  641. irq_hw_number_t *out_hwirq, unsigned int *out_type)
  642. {
  643. if (WARN_ON(intsize < 2))
  644. return -EINVAL;
  645. *out_hwirq = intspec[0];
  646. *out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
  647. return 0;
  648. }
  649. EXPORT_SYMBOL_GPL(irq_domain_xlate_twocell);
  650. /**
  651. * irq_domain_xlate_onetwocell() - Generic xlate for one or two cell bindings
  652. *
  653. * Device Tree IRQ specifier translation function which works with either one
  654. * or two cell bindings where the cell values map directly to the hwirq number
  655. * and linux irq flags.
  656. *
  657. * Note: don't use this function unless your interrupt controller explicitly
  658. * supports both one and two cell bindings. For the majority of controllers
  659. * the _onecell() or _twocell() variants above should be used.
  660. */
  661. int irq_domain_xlate_onetwocell(struct irq_domain *d,
  662. struct device_node *ctrlr,
  663. const u32 *intspec, unsigned int intsize,
  664. unsigned long *out_hwirq, unsigned int *out_type)
  665. {
  666. if (WARN_ON(intsize < 1))
  667. return -EINVAL;
  668. *out_hwirq = intspec[0];
  669. *out_type = (intsize > 1) ? intspec[1] : IRQ_TYPE_NONE;
  670. return 0;
  671. }
  672. EXPORT_SYMBOL_GPL(irq_domain_xlate_onetwocell);
  673. const struct irq_domain_ops irq_domain_simple_ops = {
  674. .xlate = irq_domain_xlate_onetwocell,
  675. };
  676. EXPORT_SYMBOL_GPL(irq_domain_simple_ops);
  677. static int irq_domain_alloc_descs(int virq, unsigned int cnt,
  678. irq_hw_number_t hwirq, int node)
  679. {
  680. unsigned int hint;
  681. if (virq >= 0) {
  682. virq = irq_alloc_descs(virq, virq, cnt, node);
  683. } else {
  684. hint = hwirq % nr_irqs;
  685. if (hint == 0)
  686. hint++;
  687. virq = irq_alloc_descs_from(hint, cnt, node);
  688. if (virq <= 0 && hint > 1)
  689. virq = irq_alloc_descs_from(1, cnt, node);
  690. }
  691. return virq;
  692. }
  693. #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
  694. /**
  695. * irq_domain_add_hierarchy - Add a irqdomain into the hierarchy
  696. * @parent: Parent irq domain to associate with the new domain
  697. * @flags: Irq domain flags associated to the domain
  698. * @size: Size of the domain. See below
  699. * @node: Optional device-tree node of the interrupt controller
  700. * @ops: Pointer to the interrupt domain callbacks
  701. * @host_data: Controller private data pointer
  702. *
  703. * If @size is 0 a tree domain is created, otherwise a linear domain.
  704. *
  705. * If successful the parent is associated to the new domain and the
  706. * domain flags are set.
  707. * Returns pointer to IRQ domain, or NULL on failure.
  708. */
  709. struct irq_domain *irq_domain_add_hierarchy(struct irq_domain *parent,
  710. unsigned int flags,
  711. unsigned int size,
  712. struct device_node *node,
  713. const struct irq_domain_ops *ops,
  714. void *host_data)
  715. {
  716. struct irq_domain *domain;
  717. if (size)
  718. domain = irq_domain_add_linear(node, size, ops, host_data);
  719. else
  720. domain = irq_domain_add_tree(node, ops, host_data);
  721. if (domain) {
  722. domain->parent = parent;
  723. domain->flags |= flags;
  724. }
  725. return domain;
  726. }
  727. static void irq_domain_insert_irq(int virq)
  728. {
  729. struct irq_data *data;
  730. for (data = irq_get_irq_data(virq); data; data = data->parent_data) {
  731. struct irq_domain *domain = data->domain;
  732. irq_hw_number_t hwirq = data->hwirq;
  733. if (hwirq < domain->revmap_size) {
  734. domain->linear_revmap[hwirq] = virq;
  735. } else {
  736. mutex_lock(&revmap_trees_mutex);
  737. radix_tree_insert(&domain->revmap_tree, hwirq, data);
  738. mutex_unlock(&revmap_trees_mutex);
  739. }
  740. /* If not already assigned, give the domain the chip's name */
  741. if (!domain->name && data->chip)
  742. domain->name = data->chip->name;
  743. }
  744. irq_clear_status_flags(virq, IRQ_NOREQUEST);
  745. }
  746. static void irq_domain_remove_irq(int virq)
  747. {
  748. struct irq_data *data;
  749. irq_set_status_flags(virq, IRQ_NOREQUEST);
  750. irq_set_chip_and_handler(virq, NULL, NULL);
  751. synchronize_irq(virq);
  752. smp_mb();
  753. for (data = irq_get_irq_data(virq); data; data = data->parent_data) {
  754. struct irq_domain *domain = data->domain;
  755. irq_hw_number_t hwirq = data->hwirq;
  756. if (hwirq < domain->revmap_size) {
  757. domain->linear_revmap[hwirq] = 0;
  758. } else {
  759. mutex_lock(&revmap_trees_mutex);
  760. radix_tree_delete(&domain->revmap_tree, hwirq);
  761. mutex_unlock(&revmap_trees_mutex);
  762. }
  763. }
  764. }
  765. static struct irq_data *irq_domain_insert_irq_data(struct irq_domain *domain,
  766. struct irq_data *child)
  767. {
  768. struct irq_data *irq_data;
  769. irq_data = kzalloc_node(sizeof(*irq_data), GFP_KERNEL,
  770. irq_data_get_node(child));
  771. if (irq_data) {
  772. child->parent_data = irq_data;
  773. irq_data->irq = child->irq;
  774. irq_data->common = child->common;
  775. irq_data->domain = domain;
  776. }
  777. return irq_data;
  778. }
  779. static void irq_domain_free_irq_data(unsigned int virq, unsigned int nr_irqs)
  780. {
  781. struct irq_data *irq_data, *tmp;
  782. int i;
  783. for (i = 0; i < nr_irqs; i++) {
  784. irq_data = irq_get_irq_data(virq + i);
  785. tmp = irq_data->parent_data;
  786. irq_data->parent_data = NULL;
  787. irq_data->domain = NULL;
  788. while (tmp) {
  789. irq_data = tmp;
  790. tmp = tmp->parent_data;
  791. kfree(irq_data);
  792. }
  793. }
  794. }
  795. static int irq_domain_alloc_irq_data(struct irq_domain *domain,
  796. unsigned int virq, unsigned int nr_irqs)
  797. {
  798. struct irq_data *irq_data;
  799. struct irq_domain *parent;
  800. int i;
  801. /* The outermost irq_data is embedded in struct irq_desc */
  802. for (i = 0; i < nr_irqs; i++) {
  803. irq_data = irq_get_irq_data(virq + i);
  804. irq_data->domain = domain;
  805. for (parent = domain->parent; parent; parent = parent->parent) {
  806. irq_data = irq_domain_insert_irq_data(parent, irq_data);
  807. if (!irq_data) {
  808. irq_domain_free_irq_data(virq, i + 1);
  809. return -ENOMEM;
  810. }
  811. }
  812. }
  813. return 0;
  814. }
  815. /**
  816. * irq_domain_get_irq_data - Get irq_data associated with @virq and @domain
  817. * @domain: domain to match
  818. * @virq: IRQ number to get irq_data
  819. */
  820. struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain,
  821. unsigned int virq)
  822. {
  823. struct irq_data *irq_data;
  824. for (irq_data = irq_get_irq_data(virq); irq_data;
  825. irq_data = irq_data->parent_data)
  826. if (irq_data->domain == domain)
  827. return irq_data;
  828. return NULL;
  829. }
  830. /**
  831. * irq_domain_set_hwirq_and_chip - Set hwirq and irqchip of @virq at @domain
  832. * @domain: Interrupt domain to match
  833. * @virq: IRQ number
  834. * @hwirq: The hwirq number
  835. * @chip: The associated interrupt chip
  836. * @chip_data: The associated chip data
  837. */
  838. int irq_domain_set_hwirq_and_chip(struct irq_domain *domain, unsigned int virq,
  839. irq_hw_number_t hwirq, struct irq_chip *chip,
  840. void *chip_data)
  841. {
  842. struct irq_data *irq_data = irq_domain_get_irq_data(domain, virq);
  843. if (!irq_data)
  844. return -ENOENT;
  845. irq_data->hwirq = hwirq;
  846. irq_data->chip = chip ? chip : &no_irq_chip;
  847. irq_data->chip_data = chip_data;
  848. return 0;
  849. }
  850. /**
  851. * irq_domain_set_info - Set the complete data for a @virq in @domain
  852. * @domain: Interrupt domain to match
  853. * @virq: IRQ number
  854. * @hwirq: The hardware interrupt number
  855. * @chip: The associated interrupt chip
  856. * @chip_data: The associated interrupt chip data
  857. * @handler: The interrupt flow handler
  858. * @handler_data: The interrupt flow handler data
  859. * @handler_name: The interrupt handler name
  860. */
  861. void irq_domain_set_info(struct irq_domain *domain, unsigned int virq,
  862. irq_hw_number_t hwirq, struct irq_chip *chip,
  863. void *chip_data, irq_flow_handler_t handler,
  864. void *handler_data, const char *handler_name)
  865. {
  866. irq_domain_set_hwirq_and_chip(domain, virq, hwirq, chip, chip_data);
  867. __irq_set_handler(virq, handler, 0, handler_name);
  868. irq_set_handler_data(virq, handler_data);
  869. }
  870. /**
  871. * irq_domain_reset_irq_data - Clear hwirq, chip and chip_data in @irq_data
  872. * @irq_data: The pointer to irq_data
  873. */
  874. void irq_domain_reset_irq_data(struct irq_data *irq_data)
  875. {
  876. irq_data->hwirq = 0;
  877. irq_data->chip = &no_irq_chip;
  878. irq_data->chip_data = NULL;
  879. }
  880. /**
  881. * irq_domain_free_irqs_common - Clear irq_data and free the parent
  882. * @domain: Interrupt domain to match
  883. * @virq: IRQ number to start with
  884. * @nr_irqs: The number of irqs to free
  885. */
  886. void irq_domain_free_irqs_common(struct irq_domain *domain, unsigned int virq,
  887. unsigned int nr_irqs)
  888. {
  889. struct irq_data *irq_data;
  890. int i;
  891. for (i = 0; i < nr_irqs; i++) {
  892. irq_data = irq_domain_get_irq_data(domain, virq + i);
  893. if (irq_data)
  894. irq_domain_reset_irq_data(irq_data);
  895. }
  896. irq_domain_free_irqs_parent(domain, virq, nr_irqs);
  897. }
  898. /**
  899. * irq_domain_free_irqs_top - Clear handler and handler data, clear irqdata and free parent
  900. * @domain: Interrupt domain to match
  901. * @virq: IRQ number to start with
  902. * @nr_irqs: The number of irqs to free
  903. */
  904. void irq_domain_free_irqs_top(struct irq_domain *domain, unsigned int virq,
  905. unsigned int nr_irqs)
  906. {
  907. int i;
  908. for (i = 0; i < nr_irqs; i++) {
  909. irq_set_handler_data(virq + i, NULL);
  910. irq_set_handler(virq + i, NULL);
  911. }
  912. irq_domain_free_irqs_common(domain, virq, nr_irqs);
  913. }
  914. static bool irq_domain_is_auto_recursive(struct irq_domain *domain)
  915. {
  916. return domain->flags & IRQ_DOMAIN_FLAG_AUTO_RECURSIVE;
  917. }
  918. static void irq_domain_free_irqs_recursive(struct irq_domain *domain,
  919. unsigned int irq_base,
  920. unsigned int nr_irqs)
  921. {
  922. domain->ops->free(domain, irq_base, nr_irqs);
  923. if (irq_domain_is_auto_recursive(domain)) {
  924. BUG_ON(!domain->parent);
  925. irq_domain_free_irqs_recursive(domain->parent, irq_base,
  926. nr_irqs);
  927. }
  928. }
  929. static int irq_domain_alloc_irqs_recursive(struct irq_domain *domain,
  930. unsigned int irq_base,
  931. unsigned int nr_irqs, void *arg)
  932. {
  933. int ret = 0;
  934. struct irq_domain *parent = domain->parent;
  935. bool recursive = irq_domain_is_auto_recursive(domain);
  936. BUG_ON(recursive && !parent);
  937. if (recursive)
  938. ret = irq_domain_alloc_irqs_recursive(parent, irq_base,
  939. nr_irqs, arg);
  940. if (ret >= 0)
  941. ret = domain->ops->alloc(domain, irq_base, nr_irqs, arg);
  942. if (ret < 0 && recursive)
  943. irq_domain_free_irqs_recursive(parent, irq_base, nr_irqs);
  944. return ret;
  945. }
  946. /**
  947. * __irq_domain_alloc_irqs - Allocate IRQs from domain
  948. * @domain: domain to allocate from
  949. * @irq_base: allocate specified IRQ nubmer if irq_base >= 0
  950. * @nr_irqs: number of IRQs to allocate
  951. * @node: NUMA node id for memory allocation
  952. * @arg: domain specific argument
  953. * @realloc: IRQ descriptors have already been allocated if true
  954. *
  955. * Allocate IRQ numbers and initialized all data structures to support
  956. * hierarchy IRQ domains.
  957. * Parameter @realloc is mainly to support legacy IRQs.
  958. * Returns error code or allocated IRQ number
  959. *
  960. * The whole process to setup an IRQ has been split into two steps.
  961. * The first step, __irq_domain_alloc_irqs(), is to allocate IRQ
  962. * descriptor and required hardware resources. The second step,
  963. * irq_domain_activate_irq(), is to program hardwares with preallocated
  964. * resources. In this way, it's easier to rollback when failing to
  965. * allocate resources.
  966. */
  967. int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
  968. unsigned int nr_irqs, int node, void *arg,
  969. bool realloc)
  970. {
  971. int i, ret, virq;
  972. if (domain == NULL) {
  973. domain = irq_default_domain;
  974. if (WARN(!domain, "domain is NULL; cannot allocate IRQ\n"))
  975. return -EINVAL;
  976. }
  977. if (!domain->ops->alloc) {
  978. pr_debug("domain->ops->alloc() is NULL\n");
  979. return -ENOSYS;
  980. }
  981. if (realloc && irq_base >= 0) {
  982. virq = irq_base;
  983. } else {
  984. virq = irq_domain_alloc_descs(irq_base, nr_irqs, 0, node);
  985. if (virq < 0) {
  986. pr_debug("cannot allocate IRQ(base %d, count %d)\n",
  987. irq_base, nr_irqs);
  988. return virq;
  989. }
  990. }
  991. if (irq_domain_alloc_irq_data(domain, virq, nr_irqs)) {
  992. pr_debug("cannot allocate memory for IRQ%d\n", virq);
  993. ret = -ENOMEM;
  994. goto out_free_desc;
  995. }
  996. mutex_lock(&irq_domain_mutex);
  997. ret = irq_domain_alloc_irqs_recursive(domain, virq, nr_irqs, arg);
  998. if (ret < 0) {
  999. mutex_unlock(&irq_domain_mutex);
  1000. goto out_free_irq_data;
  1001. }
  1002. for (i = 0; i < nr_irqs; i++)
  1003. irq_domain_insert_irq(virq + i);
  1004. mutex_unlock(&irq_domain_mutex);
  1005. return virq;
  1006. out_free_irq_data:
  1007. irq_domain_free_irq_data(virq, nr_irqs);
  1008. out_free_desc:
  1009. irq_free_descs(virq, nr_irqs);
  1010. return ret;
  1011. }
  1012. /**
  1013. * irq_domain_free_irqs - Free IRQ number and associated data structures
  1014. * @virq: base IRQ number
  1015. * @nr_irqs: number of IRQs to free
  1016. */
  1017. void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs)
  1018. {
  1019. struct irq_data *data = irq_get_irq_data(virq);
  1020. int i;
  1021. if (WARN(!data || !data->domain || !data->domain->ops->free,
  1022. "NULL pointer, cannot free irq\n"))
  1023. return;
  1024. mutex_lock(&irq_domain_mutex);
  1025. for (i = 0; i < nr_irqs; i++)
  1026. irq_domain_remove_irq(virq + i);
  1027. irq_domain_free_irqs_recursive(data->domain, virq, nr_irqs);
  1028. mutex_unlock(&irq_domain_mutex);
  1029. irq_domain_free_irq_data(virq, nr_irqs);
  1030. irq_free_descs(virq, nr_irqs);
  1031. }
  1032. /**
  1033. * irq_domain_alloc_irqs_parent - Allocate interrupts from parent domain
  1034. * @irq_base: Base IRQ number
  1035. * @nr_irqs: Number of IRQs to allocate
  1036. * @arg: Allocation data (arch/domain specific)
  1037. *
  1038. * Check whether the domain has been setup recursive. If not allocate
  1039. * through the parent domain.
  1040. */
  1041. int irq_domain_alloc_irqs_parent(struct irq_domain *domain,
  1042. unsigned int irq_base, unsigned int nr_irqs,
  1043. void *arg)
  1044. {
  1045. /* irq_domain_alloc_irqs_recursive() has called parent's alloc() */
  1046. if (irq_domain_is_auto_recursive(domain))
  1047. return 0;
  1048. domain = domain->parent;
  1049. if (domain)
  1050. return irq_domain_alloc_irqs_recursive(domain, irq_base,
  1051. nr_irqs, arg);
  1052. return -ENOSYS;
  1053. }
  1054. /**
  1055. * irq_domain_free_irqs_parent - Free interrupts from parent domain
  1056. * @irq_base: Base IRQ number
  1057. * @nr_irqs: Number of IRQs to free
  1058. *
  1059. * Check whether the domain has been setup recursive. If not free
  1060. * through the parent domain.
  1061. */
  1062. void irq_domain_free_irqs_parent(struct irq_domain *domain,
  1063. unsigned int irq_base, unsigned int nr_irqs)
  1064. {
  1065. /* irq_domain_free_irqs_recursive() will call parent's free */
  1066. if (!irq_domain_is_auto_recursive(domain) && domain->parent)
  1067. irq_domain_free_irqs_recursive(domain->parent, irq_base,
  1068. nr_irqs);
  1069. }
  1070. /**
  1071. * irq_domain_activate_irq - Call domain_ops->activate recursively to activate
  1072. * interrupt
  1073. * @irq_data: outermost irq_data associated with interrupt
  1074. *
  1075. * This is the second step to call domain_ops->activate to program interrupt
  1076. * controllers, so the interrupt could actually get delivered.
  1077. */
  1078. void irq_domain_activate_irq(struct irq_data *irq_data)
  1079. {
  1080. if (irq_data && irq_data->domain) {
  1081. struct irq_domain *domain = irq_data->domain;
  1082. if (irq_data->parent_data)
  1083. irq_domain_activate_irq(irq_data->parent_data);
  1084. if (domain->ops->activate)
  1085. domain->ops->activate(domain, irq_data);
  1086. }
  1087. }
  1088. /**
  1089. * irq_domain_deactivate_irq - Call domain_ops->deactivate recursively to
  1090. * deactivate interrupt
  1091. * @irq_data: outermost irq_data associated with interrupt
  1092. *
  1093. * It calls domain_ops->deactivate to program interrupt controllers to disable
  1094. * interrupt delivery.
  1095. */
  1096. void irq_domain_deactivate_irq(struct irq_data *irq_data)
  1097. {
  1098. if (irq_data && irq_data->domain) {
  1099. struct irq_domain *domain = irq_data->domain;
  1100. if (domain->ops->deactivate)
  1101. domain->ops->deactivate(domain, irq_data);
  1102. if (irq_data->parent_data)
  1103. irq_domain_deactivate_irq(irq_data->parent_data);
  1104. }
  1105. }
  1106. static void irq_domain_check_hierarchy(struct irq_domain *domain)
  1107. {
  1108. /* Hierarchy irq_domains must implement callback alloc() */
  1109. if (domain->ops->alloc)
  1110. domain->flags |= IRQ_DOMAIN_FLAG_HIERARCHY;
  1111. }
  1112. #else /* CONFIG_IRQ_DOMAIN_HIERARCHY */
  1113. /**
  1114. * irq_domain_get_irq_data - Get irq_data associated with @virq and @domain
  1115. * @domain: domain to match
  1116. * @virq: IRQ number to get irq_data
  1117. */
  1118. struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain,
  1119. unsigned int virq)
  1120. {
  1121. struct irq_data *irq_data = irq_get_irq_data(virq);
  1122. return (irq_data && irq_data->domain == domain) ? irq_data : NULL;
  1123. }
  1124. /**
  1125. * irq_domain_set_info - Set the complete data for a @virq in @domain
  1126. * @domain: Interrupt domain to match
  1127. * @virq: IRQ number
  1128. * @hwirq: The hardware interrupt number
  1129. * @chip: The associated interrupt chip
  1130. * @chip_data: The associated interrupt chip data
  1131. * @handler: The interrupt flow handler
  1132. * @handler_data: The interrupt flow handler data
  1133. * @handler_name: The interrupt handler name
  1134. */
  1135. void irq_domain_set_info(struct irq_domain *domain, unsigned int virq,
  1136. irq_hw_number_t hwirq, struct irq_chip *chip,
  1137. void *chip_data, irq_flow_handler_t handler,
  1138. void *handler_data, const char *handler_name)
  1139. {
  1140. irq_set_chip_and_handler_name(virq, chip, handler, handler_name);
  1141. irq_set_chip_data(virq, chip_data);
  1142. irq_set_handler_data(virq, handler_data);
  1143. }
  1144. static void irq_domain_check_hierarchy(struct irq_domain *domain)
  1145. {
  1146. }
  1147. #endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */