irq-gic-v3.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812
  1. /*
  2. * Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved.
  3. * Author: Marc Zyngier <marc.zyngier@arm.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #include <linux/cpu.h>
  18. #include <linux/cpu_pm.h>
  19. #include <linux/delay.h>
  20. #include <linux/interrupt.h>
  21. #include <linux/of.h>
  22. #include <linux/of_address.h>
  23. #include <linux/of_irq.h>
  24. #include <linux/percpu.h>
  25. #include <linux/slab.h>
  26. #include <linux/irqchip/arm-gic-v3.h>
  27. #include <asm/cputype.h>
  28. #include <asm/exception.h>
  29. #include <asm/smp_plat.h>
  30. #include "irq-gic-common.h"
  31. #include "irqchip.h"
  32. struct redist_region {
  33. void __iomem *redist_base;
  34. phys_addr_t phys_base;
  35. };
  36. struct gic_chip_data {
  37. void __iomem *dist_base;
  38. struct redist_region *redist_regions;
  39. struct rdists rdists;
  40. struct irq_domain *domain;
  41. u64 redist_stride;
  42. u32 nr_redist_regions;
  43. unsigned int irq_nr;
  44. };
  45. static struct gic_chip_data gic_data __read_mostly;
  46. #define gic_data_rdist() (this_cpu_ptr(gic_data.rdists.rdist))
  47. #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
  48. #define gic_data_rdist_sgi_base() (gic_data_rdist_rd_base() + SZ_64K)
  49. /* Our default, arbitrary priority value. Linux only uses one anyway. */
  50. #define DEFAULT_PMR_VALUE 0xf0
  51. static inline unsigned int gic_irq(struct irq_data *d)
  52. {
  53. return d->hwirq;
  54. }
  55. static inline int gic_irq_in_rdist(struct irq_data *d)
  56. {
  57. return gic_irq(d) < 32;
  58. }
  59. static inline void __iomem *gic_dist_base(struct irq_data *d)
  60. {
  61. if (gic_irq_in_rdist(d)) /* SGI+PPI -> SGI_base for this CPU */
  62. return gic_data_rdist_sgi_base();
  63. if (d->hwirq <= 1023) /* SPI -> dist_base */
  64. return gic_data.dist_base;
  65. return NULL;
  66. }
  67. static void gic_do_wait_for_rwp(void __iomem *base)
  68. {
  69. u32 count = 1000000; /* 1s! */
  70. while (readl_relaxed(base + GICD_CTLR) & GICD_CTLR_RWP) {
  71. count--;
  72. if (!count) {
  73. pr_err_ratelimited("RWP timeout, gone fishing\n");
  74. return;
  75. }
  76. cpu_relax();
  77. udelay(1);
  78. };
  79. }
  80. /* Wait for completion of a distributor change */
  81. static void gic_dist_wait_for_rwp(void)
  82. {
  83. gic_do_wait_for_rwp(gic_data.dist_base);
  84. }
  85. /* Wait for completion of a redistributor change */
  86. static void gic_redist_wait_for_rwp(void)
  87. {
  88. gic_do_wait_for_rwp(gic_data_rdist_rd_base());
  89. }
  90. /* Low level accessors */
  91. static u64 __maybe_unused gic_read_iar(void)
  92. {
  93. u64 irqstat;
  94. asm volatile("mrs_s %0, " __stringify(ICC_IAR1_EL1) : "=r" (irqstat));
  95. return irqstat;
  96. }
  97. static void __maybe_unused gic_write_pmr(u64 val)
  98. {
  99. asm volatile("msr_s " __stringify(ICC_PMR_EL1) ", %0" : : "r" (val));
  100. }
  101. static void __maybe_unused gic_write_ctlr(u64 val)
  102. {
  103. asm volatile("msr_s " __stringify(ICC_CTLR_EL1) ", %0" : : "r" (val));
  104. isb();
  105. }
  106. static void __maybe_unused gic_write_grpen1(u64 val)
  107. {
  108. asm volatile("msr_s " __stringify(ICC_GRPEN1_EL1) ", %0" : : "r" (val));
  109. isb();
  110. }
  111. static void __maybe_unused gic_write_sgi1r(u64 val)
  112. {
  113. asm volatile("msr_s " __stringify(ICC_SGI1R_EL1) ", %0" : : "r" (val));
  114. }
  115. static void gic_enable_sre(void)
  116. {
  117. u64 val;
  118. asm volatile("mrs_s %0, " __stringify(ICC_SRE_EL1) : "=r" (val));
  119. val |= ICC_SRE_EL1_SRE;
  120. asm volatile("msr_s " __stringify(ICC_SRE_EL1) ", %0" : : "r" (val));
  121. isb();
  122. /*
  123. * Need to check that the SRE bit has actually been set. If
  124. * not, it means that SRE is disabled at EL2. We're going to
  125. * die painfully, and there is nothing we can do about it.
  126. *
  127. * Kindly inform the luser.
  128. */
  129. asm volatile("mrs_s %0, " __stringify(ICC_SRE_EL1) : "=r" (val));
  130. if (!(val & ICC_SRE_EL1_SRE))
  131. pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n");
  132. }
  133. static void gic_enable_redist(bool enable)
  134. {
  135. void __iomem *rbase;
  136. u32 count = 1000000; /* 1s! */
  137. u32 val;
  138. rbase = gic_data_rdist_rd_base();
  139. val = readl_relaxed(rbase + GICR_WAKER);
  140. if (enable)
  141. /* Wake up this CPU redistributor */
  142. val &= ~GICR_WAKER_ProcessorSleep;
  143. else
  144. val |= GICR_WAKER_ProcessorSleep;
  145. writel_relaxed(val, rbase + GICR_WAKER);
  146. if (!enable) { /* Check that GICR_WAKER is writeable */
  147. val = readl_relaxed(rbase + GICR_WAKER);
  148. if (!(val & GICR_WAKER_ProcessorSleep))
  149. return; /* No PM support in this redistributor */
  150. }
  151. while (count--) {
  152. val = readl_relaxed(rbase + GICR_WAKER);
  153. if (enable ^ (val & GICR_WAKER_ChildrenAsleep))
  154. break;
  155. cpu_relax();
  156. udelay(1);
  157. };
  158. if (!count)
  159. pr_err_ratelimited("redistributor failed to %s...\n",
  160. enable ? "wakeup" : "sleep");
  161. }
  162. /*
  163. * Routines to disable, enable, EOI and route interrupts
  164. */
  165. static void gic_poke_irq(struct irq_data *d, u32 offset)
  166. {
  167. u32 mask = 1 << (gic_irq(d) % 32);
  168. void (*rwp_wait)(void);
  169. void __iomem *base;
  170. if (gic_irq_in_rdist(d)) {
  171. base = gic_data_rdist_sgi_base();
  172. rwp_wait = gic_redist_wait_for_rwp;
  173. } else {
  174. base = gic_data.dist_base;
  175. rwp_wait = gic_dist_wait_for_rwp;
  176. }
  177. writel_relaxed(mask, base + offset + (gic_irq(d) / 32) * 4);
  178. rwp_wait();
  179. }
  180. static void gic_mask_irq(struct irq_data *d)
  181. {
  182. gic_poke_irq(d, GICD_ICENABLER);
  183. }
  184. static void gic_unmask_irq(struct irq_data *d)
  185. {
  186. gic_poke_irq(d, GICD_ISENABLER);
  187. }
  188. static void gic_eoi_irq(struct irq_data *d)
  189. {
  190. gic_write_eoir(gic_irq(d));
  191. }
  192. static int gic_set_type(struct irq_data *d, unsigned int type)
  193. {
  194. unsigned int irq = gic_irq(d);
  195. void (*rwp_wait)(void);
  196. void __iomem *base;
  197. /* Interrupt configuration for SGIs can't be changed */
  198. if (irq < 16)
  199. return -EINVAL;
  200. if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING)
  201. return -EINVAL;
  202. if (gic_irq_in_rdist(d)) {
  203. base = gic_data_rdist_sgi_base();
  204. rwp_wait = gic_redist_wait_for_rwp;
  205. } else {
  206. base = gic_data.dist_base;
  207. rwp_wait = gic_dist_wait_for_rwp;
  208. }
  209. gic_configure_irq(irq, type, base, rwp_wait);
  210. return 0;
  211. }
  212. static u64 gic_mpidr_to_affinity(u64 mpidr)
  213. {
  214. u64 aff;
  215. aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 |
  216. MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
  217. MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
  218. MPIDR_AFFINITY_LEVEL(mpidr, 0));
  219. return aff;
  220. }
  221. static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
  222. {
  223. u64 irqnr;
  224. do {
  225. irqnr = gic_read_iar();
  226. if (likely(irqnr > 15 && irqnr < 1020) || irqnr >= 8192) {
  227. int err;
  228. err = handle_domain_irq(gic_data.domain, irqnr, regs);
  229. if (err) {
  230. WARN_ONCE(true, "Unexpected interrupt received!\n");
  231. gic_write_eoir(irqnr);
  232. }
  233. continue;
  234. }
  235. if (irqnr < 16) {
  236. gic_write_eoir(irqnr);
  237. #ifdef CONFIG_SMP
  238. handle_IPI(irqnr, regs);
  239. #else
  240. WARN_ONCE(true, "Unexpected SGI received!\n");
  241. #endif
  242. continue;
  243. }
  244. } while (irqnr != ICC_IAR1_EL1_SPURIOUS);
  245. }
  246. static void __init gic_dist_init(void)
  247. {
  248. unsigned int i;
  249. u64 affinity;
  250. void __iomem *base = gic_data.dist_base;
  251. /* Disable the distributor */
  252. writel_relaxed(0, base + GICD_CTLR);
  253. gic_dist_wait_for_rwp();
  254. gic_dist_config(base, gic_data.irq_nr, gic_dist_wait_for_rwp);
  255. /* Enable distributor with ARE, Group1 */
  256. writel_relaxed(GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1,
  257. base + GICD_CTLR);
  258. /*
  259. * Set all global interrupts to the boot CPU only. ARE must be
  260. * enabled.
  261. */
  262. affinity = gic_mpidr_to_affinity(cpu_logical_map(smp_processor_id()));
  263. for (i = 32; i < gic_data.irq_nr; i++)
  264. writeq_relaxed(affinity, base + GICD_IROUTER + i * 8);
  265. }
  266. static int gic_populate_rdist(void)
  267. {
  268. u64 mpidr = cpu_logical_map(smp_processor_id());
  269. u64 typer;
  270. u32 aff;
  271. int i;
  272. /*
  273. * Convert affinity to a 32bit value that can be matched to
  274. * GICR_TYPER bits [63:32].
  275. */
  276. aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 |
  277. MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
  278. MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
  279. MPIDR_AFFINITY_LEVEL(mpidr, 0));
  280. for (i = 0; i < gic_data.nr_redist_regions; i++) {
  281. void __iomem *ptr = gic_data.redist_regions[i].redist_base;
  282. u32 reg;
  283. reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK;
  284. if (reg != GIC_PIDR2_ARCH_GICv3 &&
  285. reg != GIC_PIDR2_ARCH_GICv4) { /* We're in trouble... */
  286. pr_warn("No redistributor present @%p\n", ptr);
  287. break;
  288. }
  289. do {
  290. typer = readq_relaxed(ptr + GICR_TYPER);
  291. if ((typer >> 32) == aff) {
  292. u64 offset = ptr - gic_data.redist_regions[i].redist_base;
  293. gic_data_rdist_rd_base() = ptr;
  294. gic_data_rdist()->phys_base = gic_data.redist_regions[i].phys_base + offset;
  295. pr_info("CPU%d: found redistributor %llx region %d:%pa\n",
  296. smp_processor_id(),
  297. (unsigned long long)mpidr,
  298. i, &gic_data_rdist()->phys_base);
  299. return 0;
  300. }
  301. if (gic_data.redist_stride) {
  302. ptr += gic_data.redist_stride;
  303. } else {
  304. ptr += SZ_64K * 2; /* Skip RD_base + SGI_base */
  305. if (typer & GICR_TYPER_VLPIS)
  306. ptr += SZ_64K * 2; /* Skip VLPI_base + reserved page */
  307. }
  308. } while (!(typer & GICR_TYPER_LAST));
  309. }
  310. /* We couldn't even deal with ourselves... */
  311. WARN(true, "CPU%d: mpidr %llx has no re-distributor!\n",
  312. smp_processor_id(), (unsigned long long)mpidr);
  313. return -ENODEV;
  314. }
  315. static void gic_cpu_sys_reg_init(void)
  316. {
  317. /* Enable system registers */
  318. gic_enable_sre();
  319. /* Set priority mask register */
  320. gic_write_pmr(DEFAULT_PMR_VALUE);
  321. /* EOI deactivates interrupt too (mode 0) */
  322. gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop_dir);
  323. /* ... and let's hit the road... */
  324. gic_write_grpen1(1);
  325. }
  326. static int gic_dist_supports_lpis(void)
  327. {
  328. return !!(readl_relaxed(gic_data.dist_base + GICD_TYPER) & GICD_TYPER_LPIS);
  329. }
  330. static void gic_cpu_init(void)
  331. {
  332. void __iomem *rbase;
  333. /* Register ourselves with the rest of the world */
  334. if (gic_populate_rdist())
  335. return;
  336. gic_enable_redist(true);
  337. rbase = gic_data_rdist_sgi_base();
  338. gic_cpu_config(rbase, gic_redist_wait_for_rwp);
  339. /* Give LPIs a spin */
  340. if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis())
  341. its_cpu_init();
  342. /* initialise system registers */
  343. gic_cpu_sys_reg_init();
  344. }
  345. #ifdef CONFIG_SMP
  346. static int gic_peek_irq(struct irq_data *d, u32 offset)
  347. {
  348. u32 mask = 1 << (gic_irq(d) % 32);
  349. void __iomem *base;
  350. if (gic_irq_in_rdist(d))
  351. base = gic_data_rdist_sgi_base();
  352. else
  353. base = gic_data.dist_base;
  354. return !!(readl_relaxed(base + offset + (gic_irq(d) / 32) * 4) & mask);
  355. }
  356. static int gic_secondary_init(struct notifier_block *nfb,
  357. unsigned long action, void *hcpu)
  358. {
  359. if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
  360. gic_cpu_init();
  361. return NOTIFY_OK;
  362. }
  363. /*
  364. * Notifier for enabling the GIC CPU interface. Set an arbitrarily high
  365. * priority because the GIC needs to be up before the ARM generic timers.
  366. */
  367. static struct notifier_block gic_cpu_notifier = {
  368. .notifier_call = gic_secondary_init,
  369. .priority = 100,
  370. };
  371. static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
  372. u64 cluster_id)
  373. {
  374. int cpu = *base_cpu;
  375. u64 mpidr = cpu_logical_map(cpu);
  376. u16 tlist = 0;
  377. while (cpu < nr_cpu_ids) {
  378. /*
  379. * If we ever get a cluster of more than 16 CPUs, just
  380. * scream and skip that CPU.
  381. */
  382. if (WARN_ON((mpidr & 0xff) >= 16))
  383. goto out;
  384. tlist |= 1 << (mpidr & 0xf);
  385. cpu = cpumask_next(cpu, mask);
  386. if (cpu == nr_cpu_ids)
  387. goto out;
  388. mpidr = cpu_logical_map(cpu);
  389. if (cluster_id != (mpidr & ~0xffUL)) {
  390. cpu--;
  391. goto out;
  392. }
  393. }
  394. out:
  395. *base_cpu = cpu;
  396. return tlist;
  397. }
  398. static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq)
  399. {
  400. u64 val;
  401. val = (MPIDR_AFFINITY_LEVEL(cluster_id, 3) << 48 |
  402. MPIDR_AFFINITY_LEVEL(cluster_id, 2) << 32 |
  403. irq << 24 |
  404. MPIDR_AFFINITY_LEVEL(cluster_id, 1) << 16 |
  405. tlist);
  406. pr_debug("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val);
  407. gic_write_sgi1r(val);
  408. }
  409. static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
  410. {
  411. int cpu;
  412. if (WARN_ON(irq >= 16))
  413. return;
  414. /*
  415. * Ensure that stores to Normal memory are visible to the
  416. * other CPUs before issuing the IPI.
  417. */
  418. smp_wmb();
  419. for_each_cpu_mask(cpu, *mask) {
  420. u64 cluster_id = cpu_logical_map(cpu) & ~0xffUL;
  421. u16 tlist;
  422. tlist = gic_compute_target_list(&cpu, mask, cluster_id);
  423. gic_send_sgi(cluster_id, tlist, irq);
  424. }
  425. /* Force the above writes to ICC_SGI1R_EL1 to be executed */
  426. isb();
  427. }
  428. static void gic_smp_init(void)
  429. {
  430. set_smp_cross_call(gic_raise_softirq);
  431. register_cpu_notifier(&gic_cpu_notifier);
  432. }
  433. static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
  434. bool force)
  435. {
  436. unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
  437. void __iomem *reg;
  438. int enabled;
  439. u64 val;
  440. if (gic_irq_in_rdist(d))
  441. return -EINVAL;
  442. /* If interrupt was enabled, disable it first */
  443. enabled = gic_peek_irq(d, GICD_ISENABLER);
  444. if (enabled)
  445. gic_mask_irq(d);
  446. reg = gic_dist_base(d) + GICD_IROUTER + (gic_irq(d) * 8);
  447. val = gic_mpidr_to_affinity(cpu_logical_map(cpu));
  448. writeq_relaxed(val, reg);
  449. /*
  450. * If the interrupt was enabled, enabled it again. Otherwise,
  451. * just wait for the distributor to have digested our changes.
  452. */
  453. if (enabled)
  454. gic_unmask_irq(d);
  455. else
  456. gic_dist_wait_for_rwp();
  457. return IRQ_SET_MASK_OK;
  458. }
  459. #else
  460. #define gic_set_affinity NULL
  461. #define gic_smp_init() do { } while(0)
  462. #endif
  463. #ifdef CONFIG_CPU_PM
  464. static int gic_cpu_pm_notifier(struct notifier_block *self,
  465. unsigned long cmd, void *v)
  466. {
  467. if (cmd == CPU_PM_EXIT) {
  468. gic_enable_redist(true);
  469. gic_cpu_sys_reg_init();
  470. } else if (cmd == CPU_PM_ENTER) {
  471. gic_write_grpen1(0);
  472. gic_enable_redist(false);
  473. }
  474. return NOTIFY_OK;
  475. }
  476. static struct notifier_block gic_cpu_pm_notifier_block = {
  477. .notifier_call = gic_cpu_pm_notifier,
  478. };
  479. static void gic_cpu_pm_init(void)
  480. {
  481. cpu_pm_register_notifier(&gic_cpu_pm_notifier_block);
  482. }
  483. #else
  484. static inline void gic_cpu_pm_init(void) { }
  485. #endif /* CONFIG_CPU_PM */
  486. static struct irq_chip gic_chip = {
  487. .name = "GICv3",
  488. .irq_mask = gic_mask_irq,
  489. .irq_unmask = gic_unmask_irq,
  490. .irq_eoi = gic_eoi_irq,
  491. .irq_set_type = gic_set_type,
  492. .irq_set_affinity = gic_set_affinity,
  493. };
  494. #define GIC_ID_NR (1U << gic_data.rdists.id_bits)
  495. static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
  496. irq_hw_number_t hw)
  497. {
  498. /* SGIs are private to the core kernel */
  499. if (hw < 16)
  500. return -EPERM;
  501. /* Nothing here */
  502. if (hw >= gic_data.irq_nr && hw < 8192)
  503. return -EPERM;
  504. /* Off limits */
  505. if (hw >= GIC_ID_NR)
  506. return -EPERM;
  507. /* PPIs */
  508. if (hw < 32) {
  509. irq_set_percpu_devid(irq);
  510. irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data,
  511. handle_percpu_devid_irq, NULL, NULL);
  512. set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN);
  513. }
  514. /* SPIs */
  515. if (hw >= 32 && hw < gic_data.irq_nr) {
  516. irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data,
  517. handle_fasteoi_irq, NULL, NULL);
  518. set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
  519. }
  520. /* LPIs */
  521. if (hw >= 8192 && hw < GIC_ID_NR) {
  522. if (!gic_dist_supports_lpis())
  523. return -EPERM;
  524. irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data,
  525. handle_fasteoi_irq, NULL, NULL);
  526. set_irq_flags(irq, IRQF_VALID);
  527. }
  528. return 0;
  529. }
  530. static int gic_irq_domain_xlate(struct irq_domain *d,
  531. struct device_node *controller,
  532. const u32 *intspec, unsigned int intsize,
  533. unsigned long *out_hwirq, unsigned int *out_type)
  534. {
  535. if (d->of_node != controller)
  536. return -EINVAL;
  537. if (intsize < 3)
  538. return -EINVAL;
  539. switch(intspec[0]) {
  540. case 0: /* SPI */
  541. *out_hwirq = intspec[1] + 32;
  542. break;
  543. case 1: /* PPI */
  544. *out_hwirq = intspec[1] + 16;
  545. break;
  546. case GIC_IRQ_TYPE_LPI: /* LPI */
  547. *out_hwirq = intspec[1];
  548. break;
  549. default:
  550. return -EINVAL;
  551. }
  552. *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
  553. return 0;
  554. }
  555. static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
  556. unsigned int nr_irqs, void *arg)
  557. {
  558. int i, ret;
  559. irq_hw_number_t hwirq;
  560. unsigned int type = IRQ_TYPE_NONE;
  561. struct of_phandle_args *irq_data = arg;
  562. ret = gic_irq_domain_xlate(domain, irq_data->np, irq_data->args,
  563. irq_data->args_count, &hwirq, &type);
  564. if (ret)
  565. return ret;
  566. for (i = 0; i < nr_irqs; i++)
  567. gic_irq_domain_map(domain, virq + i, hwirq + i);
  568. return 0;
  569. }
  570. static void gic_irq_domain_free(struct irq_domain *domain, unsigned int virq,
  571. unsigned int nr_irqs)
  572. {
  573. int i;
  574. for (i = 0; i < nr_irqs; i++) {
  575. struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
  576. irq_set_handler(virq + i, NULL);
  577. irq_domain_reset_irq_data(d);
  578. }
  579. }
  580. static const struct irq_domain_ops gic_irq_domain_ops = {
  581. .xlate = gic_irq_domain_xlate,
  582. .alloc = gic_irq_domain_alloc,
  583. .free = gic_irq_domain_free,
  584. };
  585. static int __init gic_of_init(struct device_node *node, struct device_node *parent)
  586. {
  587. void __iomem *dist_base;
  588. struct redist_region *rdist_regs;
  589. u64 redist_stride;
  590. u32 nr_redist_regions;
  591. u32 typer;
  592. u32 reg;
  593. int gic_irqs;
  594. int err;
  595. int i;
  596. dist_base = of_iomap(node, 0);
  597. if (!dist_base) {
  598. pr_err("%s: unable to map gic dist registers\n",
  599. node->full_name);
  600. return -ENXIO;
  601. }
  602. reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
  603. if (reg != GIC_PIDR2_ARCH_GICv3 && reg != GIC_PIDR2_ARCH_GICv4) {
  604. pr_err("%s: no distributor detected, giving up\n",
  605. node->full_name);
  606. err = -ENODEV;
  607. goto out_unmap_dist;
  608. }
  609. if (of_property_read_u32(node, "#redistributor-regions", &nr_redist_regions))
  610. nr_redist_regions = 1;
  611. rdist_regs = kzalloc(sizeof(*rdist_regs) * nr_redist_regions, GFP_KERNEL);
  612. if (!rdist_regs) {
  613. err = -ENOMEM;
  614. goto out_unmap_dist;
  615. }
  616. for (i = 0; i < nr_redist_regions; i++) {
  617. struct resource res;
  618. int ret;
  619. ret = of_address_to_resource(node, 1 + i, &res);
  620. rdist_regs[i].redist_base = of_iomap(node, 1 + i);
  621. if (ret || !rdist_regs[i].redist_base) {
  622. pr_err("%s: couldn't map region %d\n",
  623. node->full_name, i);
  624. err = -ENODEV;
  625. goto out_unmap_rdist;
  626. }
  627. rdist_regs[i].phys_base = res.start;
  628. }
  629. if (of_property_read_u64(node, "redistributor-stride", &redist_stride))
  630. redist_stride = 0;
  631. gic_data.dist_base = dist_base;
  632. gic_data.redist_regions = rdist_regs;
  633. gic_data.nr_redist_regions = nr_redist_regions;
  634. gic_data.redist_stride = redist_stride;
  635. /*
  636. * Find out how many interrupts are supported.
  637. * The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI)
  638. */
  639. typer = readl_relaxed(gic_data.dist_base + GICD_TYPER);
  640. gic_data.rdists.id_bits = GICD_TYPER_ID_BITS(typer);
  641. gic_irqs = GICD_TYPER_IRQS(typer);
  642. if (gic_irqs > 1020)
  643. gic_irqs = 1020;
  644. gic_data.irq_nr = gic_irqs;
  645. gic_data.domain = irq_domain_add_tree(node, &gic_irq_domain_ops,
  646. &gic_data);
  647. gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist));
  648. if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) {
  649. err = -ENOMEM;
  650. goto out_free;
  651. }
  652. set_handle_irq(gic_handle_irq);
  653. if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis())
  654. its_init(node, &gic_data.rdists, gic_data.domain);
  655. gic_smp_init();
  656. gic_dist_init();
  657. gic_cpu_init();
  658. gic_cpu_pm_init();
  659. return 0;
  660. out_free:
  661. if (gic_data.domain)
  662. irq_domain_remove(gic_data.domain);
  663. free_percpu(gic_data.rdists.rdist);
  664. out_unmap_rdist:
  665. for (i = 0; i < nr_redist_regions; i++)
  666. if (rdist_regs[i].redist_base)
  667. iounmap(rdist_regs[i].redist_base);
  668. kfree(rdist_regs);
  669. out_unmap_dist:
  670. iounmap(dist_base);
  671. return err;
  672. }
  673. IRQCHIP_DECLARE(gic_v3, "arm,gic-v3", gic_of_init);