irq-gic-v3.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692
  1. /*
  2. * Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved.
  3. * Author: Marc Zyngier <marc.zyngier@arm.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #include <linux/cpu.h>
  18. #include <linux/delay.h>
  19. #include <linux/interrupt.h>
  20. #include <linux/of.h>
  21. #include <linux/of_address.h>
  22. #include <linux/of_irq.h>
  23. #include <linux/percpu.h>
  24. #include <linux/slab.h>
  25. #include <linux/irqchip/arm-gic-v3.h>
  26. #include <asm/cputype.h>
  27. #include <asm/exception.h>
  28. #include <asm/smp_plat.h>
  29. #include "irq-gic-common.h"
  30. #include "irqchip.h"
  31. struct gic_chip_data {
  32. void __iomem *dist_base;
  33. void __iomem **redist_base;
  34. void __iomem * __percpu *rdist;
  35. struct irq_domain *domain;
  36. u64 redist_stride;
  37. u32 redist_regions;
  38. unsigned int irq_nr;
  39. };
  40. static struct gic_chip_data gic_data __read_mostly;
  41. #define gic_data_rdist() (this_cpu_ptr(gic_data.rdist))
  42. #define gic_data_rdist_rd_base() (*gic_data_rdist())
  43. #define gic_data_rdist_sgi_base() (gic_data_rdist_rd_base() + SZ_64K)
  44. /* Our default, arbitrary priority value. Linux only uses one anyway. */
  45. #define DEFAULT_PMR_VALUE 0xf0
  46. static inline unsigned int gic_irq(struct irq_data *d)
  47. {
  48. return d->hwirq;
  49. }
  50. static inline int gic_irq_in_rdist(struct irq_data *d)
  51. {
  52. return gic_irq(d) < 32;
  53. }
  54. static inline void __iomem *gic_dist_base(struct irq_data *d)
  55. {
  56. if (gic_irq_in_rdist(d)) /* SGI+PPI -> SGI_base for this CPU */
  57. return gic_data_rdist_sgi_base();
  58. if (d->hwirq <= 1023) /* SPI -> dist_base */
  59. return gic_data.dist_base;
  60. if (d->hwirq >= 8192)
  61. BUG(); /* LPI Detected!!! */
  62. return NULL;
  63. }
  64. static void gic_do_wait_for_rwp(void __iomem *base)
  65. {
  66. u32 count = 1000000; /* 1s! */
  67. while (readl_relaxed(base + GICD_CTLR) & GICD_CTLR_RWP) {
  68. count--;
  69. if (!count) {
  70. pr_err_ratelimited("RWP timeout, gone fishing\n");
  71. return;
  72. }
  73. cpu_relax();
  74. udelay(1);
  75. };
  76. }
  77. /* Wait for completion of a distributor change */
  78. static void gic_dist_wait_for_rwp(void)
  79. {
  80. gic_do_wait_for_rwp(gic_data.dist_base);
  81. }
  82. /* Wait for completion of a redistributor change */
  83. static void gic_redist_wait_for_rwp(void)
  84. {
  85. gic_do_wait_for_rwp(gic_data_rdist_rd_base());
  86. }
  87. /* Low level accessors */
  88. static u64 __maybe_unused gic_read_iar(void)
  89. {
  90. u64 irqstat;
  91. asm volatile("mrs_s %0, " __stringify(ICC_IAR1_EL1) : "=r" (irqstat));
  92. return irqstat;
  93. }
  94. static void __maybe_unused gic_write_pmr(u64 val)
  95. {
  96. asm volatile("msr_s " __stringify(ICC_PMR_EL1) ", %0" : : "r" (val));
  97. }
  98. static void __maybe_unused gic_write_ctlr(u64 val)
  99. {
  100. asm volatile("msr_s " __stringify(ICC_CTLR_EL1) ", %0" : : "r" (val));
  101. isb();
  102. }
  103. static void __maybe_unused gic_write_grpen1(u64 val)
  104. {
  105. asm volatile("msr_s " __stringify(ICC_GRPEN1_EL1) ", %0" : : "r" (val));
  106. isb();
  107. }
  108. static void __maybe_unused gic_write_sgi1r(u64 val)
  109. {
  110. asm volatile("msr_s " __stringify(ICC_SGI1R_EL1) ", %0" : : "r" (val));
  111. }
  112. static void gic_enable_sre(void)
  113. {
  114. u64 val;
  115. asm volatile("mrs_s %0, " __stringify(ICC_SRE_EL1) : "=r" (val));
  116. val |= ICC_SRE_EL1_SRE;
  117. asm volatile("msr_s " __stringify(ICC_SRE_EL1) ", %0" : : "r" (val));
  118. isb();
  119. /*
  120. * Need to check that the SRE bit has actually been set. If
  121. * not, it means that SRE is disabled at EL2. We're going to
  122. * die painfully, and there is nothing we can do about it.
  123. *
  124. * Kindly inform the luser.
  125. */
  126. asm volatile("mrs_s %0, " __stringify(ICC_SRE_EL1) : "=r" (val));
  127. if (!(val & ICC_SRE_EL1_SRE))
  128. pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n");
  129. }
  130. static void gic_enable_redist(void)
  131. {
  132. void __iomem *rbase;
  133. u32 count = 1000000; /* 1s! */
  134. u32 val;
  135. rbase = gic_data_rdist_rd_base();
  136. /* Wake up this CPU redistributor */
  137. val = readl_relaxed(rbase + GICR_WAKER);
  138. val &= ~GICR_WAKER_ProcessorSleep;
  139. writel_relaxed(val, rbase + GICR_WAKER);
  140. while (readl_relaxed(rbase + GICR_WAKER) & GICR_WAKER_ChildrenAsleep) {
  141. count--;
  142. if (!count) {
  143. pr_err_ratelimited("redist didn't wake up...\n");
  144. return;
  145. }
  146. cpu_relax();
  147. udelay(1);
  148. };
  149. }
  150. /*
  151. * Routines to disable, enable, EOI and route interrupts
  152. */
  153. static void gic_poke_irq(struct irq_data *d, u32 offset)
  154. {
  155. u32 mask = 1 << (gic_irq(d) % 32);
  156. void (*rwp_wait)(void);
  157. void __iomem *base;
  158. if (gic_irq_in_rdist(d)) {
  159. base = gic_data_rdist_sgi_base();
  160. rwp_wait = gic_redist_wait_for_rwp;
  161. } else {
  162. base = gic_data.dist_base;
  163. rwp_wait = gic_dist_wait_for_rwp;
  164. }
  165. writel_relaxed(mask, base + offset + (gic_irq(d) / 32) * 4);
  166. rwp_wait();
  167. }
  168. static void gic_mask_irq(struct irq_data *d)
  169. {
  170. gic_poke_irq(d, GICD_ICENABLER);
  171. }
  172. static void gic_unmask_irq(struct irq_data *d)
  173. {
  174. gic_poke_irq(d, GICD_ISENABLER);
  175. }
  176. static void gic_eoi_irq(struct irq_data *d)
  177. {
  178. gic_write_eoir(gic_irq(d));
  179. }
  180. static int gic_set_type(struct irq_data *d, unsigned int type)
  181. {
  182. unsigned int irq = gic_irq(d);
  183. void (*rwp_wait)(void);
  184. void __iomem *base;
  185. /* Interrupt configuration for SGIs can't be changed */
  186. if (irq < 16)
  187. return -EINVAL;
  188. if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING)
  189. return -EINVAL;
  190. if (gic_irq_in_rdist(d)) {
  191. base = gic_data_rdist_sgi_base();
  192. rwp_wait = gic_redist_wait_for_rwp;
  193. } else {
  194. base = gic_data.dist_base;
  195. rwp_wait = gic_dist_wait_for_rwp;
  196. }
  197. gic_configure_irq(irq, type, base, rwp_wait);
  198. return 0;
  199. }
  200. static u64 gic_mpidr_to_affinity(u64 mpidr)
  201. {
  202. u64 aff;
  203. aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 |
  204. MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
  205. MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
  206. MPIDR_AFFINITY_LEVEL(mpidr, 0));
  207. return aff;
  208. }
  209. static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
  210. {
  211. u64 irqnr;
  212. do {
  213. irqnr = gic_read_iar();
  214. if (likely(irqnr > 15 && irqnr < 1020)) {
  215. u64 irq = irq_find_mapping(gic_data.domain, irqnr);
  216. if (likely(irq)) {
  217. handle_IRQ(irq, regs);
  218. continue;
  219. }
  220. WARN_ONCE(true, "Unexpected SPI received!\n");
  221. gic_write_eoir(irqnr);
  222. }
  223. if (irqnr < 16) {
  224. gic_write_eoir(irqnr);
  225. #ifdef CONFIG_SMP
  226. handle_IPI(irqnr, regs);
  227. #else
  228. WARN_ONCE(true, "Unexpected SGI received!\n");
  229. #endif
  230. continue;
  231. }
  232. } while (irqnr != ICC_IAR1_EL1_SPURIOUS);
  233. }
  234. static void __init gic_dist_init(void)
  235. {
  236. unsigned int i;
  237. u64 affinity;
  238. void __iomem *base = gic_data.dist_base;
  239. /* Disable the distributor */
  240. writel_relaxed(0, base + GICD_CTLR);
  241. gic_dist_wait_for_rwp();
  242. gic_dist_config(base, gic_data.irq_nr, gic_dist_wait_for_rwp);
  243. /* Enable distributor with ARE, Group1 */
  244. writel_relaxed(GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1,
  245. base + GICD_CTLR);
  246. /*
  247. * Set all global interrupts to the boot CPU only. ARE must be
  248. * enabled.
  249. */
  250. affinity = gic_mpidr_to_affinity(cpu_logical_map(smp_processor_id()));
  251. for (i = 32; i < gic_data.irq_nr; i++)
  252. writeq_relaxed(affinity, base + GICD_IROUTER + i * 8);
  253. }
  254. static int gic_populate_rdist(void)
  255. {
  256. u64 mpidr = cpu_logical_map(smp_processor_id());
  257. u64 typer;
  258. u32 aff;
  259. int i;
  260. /*
  261. * Convert affinity to a 32bit value that can be matched to
  262. * GICR_TYPER bits [63:32].
  263. */
  264. aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 |
  265. MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
  266. MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
  267. MPIDR_AFFINITY_LEVEL(mpidr, 0));
  268. for (i = 0; i < gic_data.redist_regions; i++) {
  269. void __iomem *ptr = gic_data.redist_base[i];
  270. u32 reg;
  271. reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK;
  272. if (reg != GIC_PIDR2_ARCH_GICv3 &&
  273. reg != GIC_PIDR2_ARCH_GICv4) { /* We're in trouble... */
  274. pr_warn("No redistributor present @%p\n", ptr);
  275. break;
  276. }
  277. do {
  278. typer = readq_relaxed(ptr + GICR_TYPER);
  279. if ((typer >> 32) == aff) {
  280. gic_data_rdist_rd_base() = ptr;
  281. pr_info("CPU%d: found redistributor %llx @%p\n",
  282. smp_processor_id(),
  283. (unsigned long long)mpidr, ptr);
  284. return 0;
  285. }
  286. if (gic_data.redist_stride) {
  287. ptr += gic_data.redist_stride;
  288. } else {
  289. ptr += SZ_64K * 2; /* Skip RD_base + SGI_base */
  290. if (typer & GICR_TYPER_VLPIS)
  291. ptr += SZ_64K * 2; /* Skip VLPI_base + reserved page */
  292. }
  293. } while (!(typer & GICR_TYPER_LAST));
  294. }
  295. /* We couldn't even deal with ourselves... */
  296. WARN(true, "CPU%d: mpidr %llx has no re-distributor!\n",
  297. smp_processor_id(), (unsigned long long)mpidr);
  298. return -ENODEV;
  299. }
  300. static void gic_cpu_init(void)
  301. {
  302. void __iomem *rbase;
  303. /* Register ourselves with the rest of the world */
  304. if (gic_populate_rdist())
  305. return;
  306. gic_enable_redist();
  307. rbase = gic_data_rdist_sgi_base();
  308. gic_cpu_config(rbase, gic_redist_wait_for_rwp);
  309. /* Enable system registers */
  310. gic_enable_sre();
  311. /* Set priority mask register */
  312. gic_write_pmr(DEFAULT_PMR_VALUE);
  313. /* EOI deactivates interrupt too (mode 0) */
  314. gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop_dir);
  315. /* ... and let's hit the road... */
  316. gic_write_grpen1(1);
  317. }
  318. #ifdef CONFIG_SMP
  319. static int gic_peek_irq(struct irq_data *d, u32 offset)
  320. {
  321. u32 mask = 1 << (gic_irq(d) % 32);
  322. void __iomem *base;
  323. if (gic_irq_in_rdist(d))
  324. base = gic_data_rdist_sgi_base();
  325. else
  326. base = gic_data.dist_base;
  327. return !!(readl_relaxed(base + offset + (gic_irq(d) / 32) * 4) & mask);
  328. }
  329. static int gic_secondary_init(struct notifier_block *nfb,
  330. unsigned long action, void *hcpu)
  331. {
  332. if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
  333. gic_cpu_init();
  334. return NOTIFY_OK;
  335. }
  336. /*
  337. * Notifier for enabling the GIC CPU interface. Set an arbitrarily high
  338. * priority because the GIC needs to be up before the ARM generic timers.
  339. */
  340. static struct notifier_block gic_cpu_notifier = {
  341. .notifier_call = gic_secondary_init,
  342. .priority = 100,
  343. };
  344. static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
  345. u64 cluster_id)
  346. {
  347. int cpu = *base_cpu;
  348. u64 mpidr = cpu_logical_map(cpu);
  349. u16 tlist = 0;
  350. while (cpu < nr_cpu_ids) {
  351. /*
  352. * If we ever get a cluster of more than 16 CPUs, just
  353. * scream and skip that CPU.
  354. */
  355. if (WARN_ON((mpidr & 0xff) >= 16))
  356. goto out;
  357. tlist |= 1 << (mpidr & 0xf);
  358. cpu = cpumask_next(cpu, mask);
  359. if (cpu == nr_cpu_ids)
  360. goto out;
  361. mpidr = cpu_logical_map(cpu);
  362. if (cluster_id != (mpidr & ~0xffUL)) {
  363. cpu--;
  364. goto out;
  365. }
  366. }
  367. out:
  368. *base_cpu = cpu;
  369. return tlist;
  370. }
  371. static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq)
  372. {
  373. u64 val;
  374. val = (MPIDR_AFFINITY_LEVEL(cluster_id, 3) << 48 |
  375. MPIDR_AFFINITY_LEVEL(cluster_id, 2) << 32 |
  376. irq << 24 |
  377. MPIDR_AFFINITY_LEVEL(cluster_id, 1) << 16 |
  378. tlist);
  379. pr_debug("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val);
  380. gic_write_sgi1r(val);
  381. }
  382. static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
  383. {
  384. int cpu;
  385. if (WARN_ON(irq >= 16))
  386. return;
  387. /*
  388. * Ensure that stores to Normal memory are visible to the
  389. * other CPUs before issuing the IPI.
  390. */
  391. smp_wmb();
  392. for_each_cpu_mask(cpu, *mask) {
  393. u64 cluster_id = cpu_logical_map(cpu) & ~0xffUL;
  394. u16 tlist;
  395. tlist = gic_compute_target_list(&cpu, mask, cluster_id);
  396. gic_send_sgi(cluster_id, tlist, irq);
  397. }
  398. /* Force the above writes to ICC_SGI1R_EL1 to be executed */
  399. isb();
  400. }
  401. static void gic_smp_init(void)
  402. {
  403. set_smp_cross_call(gic_raise_softirq);
  404. register_cpu_notifier(&gic_cpu_notifier);
  405. }
  406. static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
  407. bool force)
  408. {
  409. unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
  410. void __iomem *reg;
  411. int enabled;
  412. u64 val;
  413. if (gic_irq_in_rdist(d))
  414. return -EINVAL;
  415. /* If interrupt was enabled, disable it first */
  416. enabled = gic_peek_irq(d, GICD_ISENABLER);
  417. if (enabled)
  418. gic_mask_irq(d);
  419. reg = gic_dist_base(d) + GICD_IROUTER + (gic_irq(d) * 8);
  420. val = gic_mpidr_to_affinity(cpu_logical_map(cpu));
  421. writeq_relaxed(val, reg);
  422. /*
  423. * If the interrupt was enabled, enabled it again. Otherwise,
  424. * just wait for the distributor to have digested our changes.
  425. */
  426. if (enabled)
  427. gic_unmask_irq(d);
  428. else
  429. gic_dist_wait_for_rwp();
  430. return IRQ_SET_MASK_OK;
  431. }
  432. #else
  433. #define gic_set_affinity NULL
  434. #define gic_smp_init() do { } while(0)
  435. #endif
  436. static struct irq_chip gic_chip = {
  437. .name = "GICv3",
  438. .irq_mask = gic_mask_irq,
  439. .irq_unmask = gic_unmask_irq,
  440. .irq_eoi = gic_eoi_irq,
  441. .irq_set_type = gic_set_type,
  442. .irq_set_affinity = gic_set_affinity,
  443. };
  444. static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
  445. irq_hw_number_t hw)
  446. {
  447. /* SGIs are private to the core kernel */
  448. if (hw < 16)
  449. return -EPERM;
  450. /* PPIs */
  451. if (hw < 32) {
  452. irq_set_percpu_devid(irq);
  453. irq_set_chip_and_handler(irq, &gic_chip,
  454. handle_percpu_devid_irq);
  455. set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN);
  456. }
  457. /* SPIs */
  458. if (hw >= 32 && hw < gic_data.irq_nr) {
  459. irq_set_chip_and_handler(irq, &gic_chip,
  460. handle_fasteoi_irq);
  461. set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
  462. }
  463. irq_set_chip_data(irq, d->host_data);
  464. return 0;
  465. }
  466. static int gic_irq_domain_xlate(struct irq_domain *d,
  467. struct device_node *controller,
  468. const u32 *intspec, unsigned int intsize,
  469. unsigned long *out_hwirq, unsigned int *out_type)
  470. {
  471. if (d->of_node != controller)
  472. return -EINVAL;
  473. if (intsize < 3)
  474. return -EINVAL;
  475. switch(intspec[0]) {
  476. case 0: /* SPI */
  477. *out_hwirq = intspec[1] + 32;
  478. break;
  479. case 1: /* PPI */
  480. *out_hwirq = intspec[1] + 16;
  481. break;
  482. default:
  483. return -EINVAL;
  484. }
  485. *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
  486. return 0;
  487. }
  488. static const struct irq_domain_ops gic_irq_domain_ops = {
  489. .map = gic_irq_domain_map,
  490. .xlate = gic_irq_domain_xlate,
  491. };
  492. static int __init gic_of_init(struct device_node *node, struct device_node *parent)
  493. {
  494. void __iomem *dist_base;
  495. void __iomem **redist_base;
  496. u64 redist_stride;
  497. u32 redist_regions;
  498. u32 reg;
  499. int gic_irqs;
  500. int err;
  501. int i;
  502. dist_base = of_iomap(node, 0);
  503. if (!dist_base) {
  504. pr_err("%s: unable to map gic dist registers\n",
  505. node->full_name);
  506. return -ENXIO;
  507. }
  508. reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
  509. if (reg != GIC_PIDR2_ARCH_GICv3 && reg != GIC_PIDR2_ARCH_GICv4) {
  510. pr_err("%s: no distributor detected, giving up\n",
  511. node->full_name);
  512. err = -ENODEV;
  513. goto out_unmap_dist;
  514. }
  515. if (of_property_read_u32(node, "#redistributor-regions", &redist_regions))
  516. redist_regions = 1;
  517. redist_base = kzalloc(sizeof(*redist_base) * redist_regions, GFP_KERNEL);
  518. if (!redist_base) {
  519. err = -ENOMEM;
  520. goto out_unmap_dist;
  521. }
  522. for (i = 0; i < redist_regions; i++) {
  523. redist_base[i] = of_iomap(node, 1 + i);
  524. if (!redist_base[i]) {
  525. pr_err("%s: couldn't map region %d\n",
  526. node->full_name, i);
  527. err = -ENODEV;
  528. goto out_unmap_rdist;
  529. }
  530. }
  531. if (of_property_read_u64(node, "redistributor-stride", &redist_stride))
  532. redist_stride = 0;
  533. gic_data.dist_base = dist_base;
  534. gic_data.redist_base = redist_base;
  535. gic_data.redist_regions = redist_regions;
  536. gic_data.redist_stride = redist_stride;
  537. /*
  538. * Find out how many interrupts are supported.
  539. * The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI)
  540. */
  541. gic_irqs = readl_relaxed(gic_data.dist_base + GICD_TYPER) & 0x1f;
  542. gic_irqs = (gic_irqs + 1) * 32;
  543. if (gic_irqs > 1020)
  544. gic_irqs = 1020;
  545. gic_data.irq_nr = gic_irqs;
  546. gic_data.domain = irq_domain_add_tree(node, &gic_irq_domain_ops,
  547. &gic_data);
  548. gic_data.rdist = alloc_percpu(typeof(*gic_data.rdist));
  549. if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdist)) {
  550. err = -ENOMEM;
  551. goto out_free;
  552. }
  553. set_handle_irq(gic_handle_irq);
  554. gic_smp_init();
  555. gic_dist_init();
  556. gic_cpu_init();
  557. return 0;
  558. out_free:
  559. if (gic_data.domain)
  560. irq_domain_remove(gic_data.domain);
  561. free_percpu(gic_data.rdist);
  562. out_unmap_rdist:
  563. for (i = 0; i < redist_regions; i++)
  564. if (redist_base[i])
  565. iounmap(redist_base[i]);
  566. kfree(redist_base);
  567. out_unmap_dist:
  568. iounmap(dist_base);
  569. return err;
  570. }
  571. IRQCHIP_DECLARE(gic_v3, "arm,gic-v3", gic_of_init);