irq-gic-v3.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873
  1. /*
  2. * Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved.
  3. * Author: Marc Zyngier <marc.zyngier@arm.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #include <linux/cpu.h>
  18. #include <linux/cpu_pm.h>
  19. #include <linux/delay.h>
  20. #include <linux/interrupt.h>
  21. #include <linux/of.h>
  22. #include <linux/of_address.h>
  23. #include <linux/of_irq.h>
  24. #include <linux/percpu.h>
  25. #include <linux/slab.h>
  26. #include <linux/irqchip/arm-gic-v3.h>
  27. #include <asm/cputype.h>
  28. #include <asm/exception.h>
  29. #include <asm/smp_plat.h>
  30. #include "irq-gic-common.h"
  31. #include "irqchip.h"
  32. struct redist_region {
  33. void __iomem *redist_base;
  34. phys_addr_t phys_base;
  35. };
  36. struct gic_chip_data {
  37. void __iomem *dist_base;
  38. struct redist_region *redist_regions;
  39. struct rdists rdists;
  40. struct irq_domain *domain;
  41. u64 redist_stride;
  42. u32 nr_redist_regions;
  43. unsigned int irq_nr;
  44. };
  45. static struct gic_chip_data gic_data __read_mostly;
  46. #define gic_data_rdist() (this_cpu_ptr(gic_data.rdists.rdist))
  47. #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
  48. #define gic_data_rdist_sgi_base() (gic_data_rdist_rd_base() + SZ_64K)
  49. /* Our default, arbitrary priority value. Linux only uses one anyway. */
  50. #define DEFAULT_PMR_VALUE 0xf0
  51. static inline unsigned int gic_irq(struct irq_data *d)
  52. {
  53. return d->hwirq;
  54. }
  55. static inline int gic_irq_in_rdist(struct irq_data *d)
  56. {
  57. return gic_irq(d) < 32;
  58. }
  59. static inline void __iomem *gic_dist_base(struct irq_data *d)
  60. {
  61. if (gic_irq_in_rdist(d)) /* SGI+PPI -> SGI_base for this CPU */
  62. return gic_data_rdist_sgi_base();
  63. if (d->hwirq <= 1023) /* SPI -> dist_base */
  64. return gic_data.dist_base;
  65. return NULL;
  66. }
  67. static void gic_do_wait_for_rwp(void __iomem *base)
  68. {
  69. u32 count = 1000000; /* 1s! */
  70. while (readl_relaxed(base + GICD_CTLR) & GICD_CTLR_RWP) {
  71. count--;
  72. if (!count) {
  73. pr_err_ratelimited("RWP timeout, gone fishing\n");
  74. return;
  75. }
  76. cpu_relax();
  77. udelay(1);
  78. };
  79. }
  80. /* Wait for completion of a distributor change */
  81. static void gic_dist_wait_for_rwp(void)
  82. {
  83. gic_do_wait_for_rwp(gic_data.dist_base);
  84. }
  85. /* Wait for completion of a redistributor change */
  86. static void gic_redist_wait_for_rwp(void)
  87. {
  88. gic_do_wait_for_rwp(gic_data_rdist_rd_base());
  89. }
  90. /* Low level accessors */
  91. static u64 __maybe_unused gic_read_iar(void)
  92. {
  93. u64 irqstat;
  94. asm volatile("mrs_s %0, " __stringify(ICC_IAR1_EL1) : "=r" (irqstat));
  95. return irqstat;
  96. }
  97. static void __maybe_unused gic_write_pmr(u64 val)
  98. {
  99. asm volatile("msr_s " __stringify(ICC_PMR_EL1) ", %0" : : "r" (val));
  100. }
  101. static void __maybe_unused gic_write_ctlr(u64 val)
  102. {
  103. asm volatile("msr_s " __stringify(ICC_CTLR_EL1) ", %0" : : "r" (val));
  104. isb();
  105. }
  106. static void __maybe_unused gic_write_grpen1(u64 val)
  107. {
  108. asm volatile("msr_s " __stringify(ICC_GRPEN1_EL1) ", %0" : : "r" (val));
  109. isb();
  110. }
  111. static void __maybe_unused gic_write_sgi1r(u64 val)
  112. {
  113. asm volatile("msr_s " __stringify(ICC_SGI1R_EL1) ", %0" : : "r" (val));
  114. }
  115. static void gic_enable_sre(void)
  116. {
  117. u64 val;
  118. asm volatile("mrs_s %0, " __stringify(ICC_SRE_EL1) : "=r" (val));
  119. val |= ICC_SRE_EL1_SRE;
  120. asm volatile("msr_s " __stringify(ICC_SRE_EL1) ", %0" : : "r" (val));
  121. isb();
  122. /*
  123. * Need to check that the SRE bit has actually been set. If
  124. * not, it means that SRE is disabled at EL2. We're going to
  125. * die painfully, and there is nothing we can do about it.
  126. *
  127. * Kindly inform the luser.
  128. */
  129. asm volatile("mrs_s %0, " __stringify(ICC_SRE_EL1) : "=r" (val));
  130. if (!(val & ICC_SRE_EL1_SRE))
  131. pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n");
  132. }
  133. static void gic_enable_redist(bool enable)
  134. {
  135. void __iomem *rbase;
  136. u32 count = 1000000; /* 1s! */
  137. u32 val;
  138. rbase = gic_data_rdist_rd_base();
  139. val = readl_relaxed(rbase + GICR_WAKER);
  140. if (enable)
  141. /* Wake up this CPU redistributor */
  142. val &= ~GICR_WAKER_ProcessorSleep;
  143. else
  144. val |= GICR_WAKER_ProcessorSleep;
  145. writel_relaxed(val, rbase + GICR_WAKER);
  146. if (!enable) { /* Check that GICR_WAKER is writeable */
  147. val = readl_relaxed(rbase + GICR_WAKER);
  148. if (!(val & GICR_WAKER_ProcessorSleep))
  149. return; /* No PM support in this redistributor */
  150. }
  151. while (count--) {
  152. val = readl_relaxed(rbase + GICR_WAKER);
  153. if (enable ^ (val & GICR_WAKER_ChildrenAsleep))
  154. break;
  155. cpu_relax();
  156. udelay(1);
  157. };
  158. if (!count)
  159. pr_err_ratelimited("redistributor failed to %s...\n",
  160. enable ? "wakeup" : "sleep");
  161. }
  162. /*
  163. * Routines to disable, enable, EOI and route interrupts
  164. */
  165. static int gic_peek_irq(struct irq_data *d, u32 offset)
  166. {
  167. u32 mask = 1 << (gic_irq(d) % 32);
  168. void __iomem *base;
  169. if (gic_irq_in_rdist(d))
  170. base = gic_data_rdist_sgi_base();
  171. else
  172. base = gic_data.dist_base;
  173. return !!(readl_relaxed(base + offset + (gic_irq(d) / 32) * 4) & mask);
  174. }
  175. static void gic_poke_irq(struct irq_data *d, u32 offset)
  176. {
  177. u32 mask = 1 << (gic_irq(d) % 32);
  178. void (*rwp_wait)(void);
  179. void __iomem *base;
  180. if (gic_irq_in_rdist(d)) {
  181. base = gic_data_rdist_sgi_base();
  182. rwp_wait = gic_redist_wait_for_rwp;
  183. } else {
  184. base = gic_data.dist_base;
  185. rwp_wait = gic_dist_wait_for_rwp;
  186. }
  187. writel_relaxed(mask, base + offset + (gic_irq(d) / 32) * 4);
  188. rwp_wait();
  189. }
  190. static void gic_mask_irq(struct irq_data *d)
  191. {
  192. gic_poke_irq(d, GICD_ICENABLER);
  193. }
  194. static void gic_unmask_irq(struct irq_data *d)
  195. {
  196. gic_poke_irq(d, GICD_ISENABLER);
  197. }
  198. static int gic_irq_set_irqchip_state(struct irq_data *d,
  199. enum irqchip_irq_state which, bool val)
  200. {
  201. u32 reg;
  202. if (d->hwirq >= gic_data.irq_nr) /* PPI/SPI only */
  203. return -EINVAL;
  204. switch (which) {
  205. case IRQCHIP_STATE_PENDING:
  206. reg = val ? GICD_ISPENDR : GICD_ICPENDR;
  207. break;
  208. case IRQCHIP_STATE_ACTIVE:
  209. reg = val ? GICD_ISACTIVER : GICD_ICACTIVER;
  210. break;
  211. case IRQCHIP_STATE_MASKED:
  212. reg = val ? GICD_ICENABLER : GICD_ISENABLER;
  213. break;
  214. default:
  215. return -EINVAL;
  216. }
  217. gic_poke_irq(d, reg);
  218. return 0;
  219. }
  220. static int gic_irq_get_irqchip_state(struct irq_data *d,
  221. enum irqchip_irq_state which, bool *val)
  222. {
  223. if (d->hwirq >= gic_data.irq_nr) /* PPI/SPI only */
  224. return -EINVAL;
  225. switch (which) {
  226. case IRQCHIP_STATE_PENDING:
  227. *val = gic_peek_irq(d, GICD_ISPENDR);
  228. break;
  229. case IRQCHIP_STATE_ACTIVE:
  230. *val = gic_peek_irq(d, GICD_ISACTIVER);
  231. break;
  232. case IRQCHIP_STATE_MASKED:
  233. *val = !gic_peek_irq(d, GICD_ISENABLER);
  234. break;
  235. default:
  236. return -EINVAL;
  237. }
  238. return 0;
  239. }
  240. static void gic_eoi_irq(struct irq_data *d)
  241. {
  242. gic_write_eoir(gic_irq(d));
  243. }
  244. static int gic_set_type(struct irq_data *d, unsigned int type)
  245. {
  246. unsigned int irq = gic_irq(d);
  247. void (*rwp_wait)(void);
  248. void __iomem *base;
  249. /* Interrupt configuration for SGIs can't be changed */
  250. if (irq < 16)
  251. return -EINVAL;
  252. /* SPIs have restrictions on the supported types */
  253. if (irq >= 32 && type != IRQ_TYPE_LEVEL_HIGH &&
  254. type != IRQ_TYPE_EDGE_RISING)
  255. return -EINVAL;
  256. if (gic_irq_in_rdist(d)) {
  257. base = gic_data_rdist_sgi_base();
  258. rwp_wait = gic_redist_wait_for_rwp;
  259. } else {
  260. base = gic_data.dist_base;
  261. rwp_wait = gic_dist_wait_for_rwp;
  262. }
  263. return gic_configure_irq(irq, type, base, rwp_wait);
  264. }
  265. static u64 gic_mpidr_to_affinity(u64 mpidr)
  266. {
  267. u64 aff;
  268. aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 |
  269. MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
  270. MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
  271. MPIDR_AFFINITY_LEVEL(mpidr, 0));
  272. return aff;
  273. }
  274. static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
  275. {
  276. u64 irqnr;
  277. do {
  278. irqnr = gic_read_iar();
  279. if (likely(irqnr > 15 && irqnr < 1020) || irqnr >= 8192) {
  280. int err;
  281. err = handle_domain_irq(gic_data.domain, irqnr, regs);
  282. if (err) {
  283. WARN_ONCE(true, "Unexpected interrupt received!\n");
  284. gic_write_eoir(irqnr);
  285. }
  286. continue;
  287. }
  288. if (irqnr < 16) {
  289. gic_write_eoir(irqnr);
  290. #ifdef CONFIG_SMP
  291. handle_IPI(irqnr, regs);
  292. #else
  293. WARN_ONCE(true, "Unexpected SGI received!\n");
  294. #endif
  295. continue;
  296. }
  297. } while (irqnr != ICC_IAR1_EL1_SPURIOUS);
  298. }
  299. static void __init gic_dist_init(void)
  300. {
  301. unsigned int i;
  302. u64 affinity;
  303. void __iomem *base = gic_data.dist_base;
  304. /* Disable the distributor */
  305. writel_relaxed(0, base + GICD_CTLR);
  306. gic_dist_wait_for_rwp();
  307. gic_dist_config(base, gic_data.irq_nr, gic_dist_wait_for_rwp);
  308. /* Enable distributor with ARE, Group1 */
  309. writel_relaxed(GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1,
  310. base + GICD_CTLR);
  311. /*
  312. * Set all global interrupts to the boot CPU only. ARE must be
  313. * enabled.
  314. */
  315. affinity = gic_mpidr_to_affinity(cpu_logical_map(smp_processor_id()));
  316. for (i = 32; i < gic_data.irq_nr; i++)
  317. writeq_relaxed(affinity, base + GICD_IROUTER + i * 8);
  318. }
  319. static int gic_populate_rdist(void)
  320. {
  321. u64 mpidr = cpu_logical_map(smp_processor_id());
  322. u64 typer;
  323. u32 aff;
  324. int i;
  325. /*
  326. * Convert affinity to a 32bit value that can be matched to
  327. * GICR_TYPER bits [63:32].
  328. */
  329. aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 |
  330. MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
  331. MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
  332. MPIDR_AFFINITY_LEVEL(mpidr, 0));
  333. for (i = 0; i < gic_data.nr_redist_regions; i++) {
  334. void __iomem *ptr = gic_data.redist_regions[i].redist_base;
  335. u32 reg;
  336. reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK;
  337. if (reg != GIC_PIDR2_ARCH_GICv3 &&
  338. reg != GIC_PIDR2_ARCH_GICv4) { /* We're in trouble... */
  339. pr_warn("No redistributor present @%p\n", ptr);
  340. break;
  341. }
  342. do {
  343. typer = readq_relaxed(ptr + GICR_TYPER);
  344. if ((typer >> 32) == aff) {
  345. u64 offset = ptr - gic_data.redist_regions[i].redist_base;
  346. gic_data_rdist_rd_base() = ptr;
  347. gic_data_rdist()->phys_base = gic_data.redist_regions[i].phys_base + offset;
  348. pr_info("CPU%d: found redistributor %llx region %d:%pa\n",
  349. smp_processor_id(),
  350. (unsigned long long)mpidr,
  351. i, &gic_data_rdist()->phys_base);
  352. return 0;
  353. }
  354. if (gic_data.redist_stride) {
  355. ptr += gic_data.redist_stride;
  356. } else {
  357. ptr += SZ_64K * 2; /* Skip RD_base + SGI_base */
  358. if (typer & GICR_TYPER_VLPIS)
  359. ptr += SZ_64K * 2; /* Skip VLPI_base + reserved page */
  360. }
  361. } while (!(typer & GICR_TYPER_LAST));
  362. }
  363. /* We couldn't even deal with ourselves... */
  364. WARN(true, "CPU%d: mpidr %llx has no re-distributor!\n",
  365. smp_processor_id(), (unsigned long long)mpidr);
  366. return -ENODEV;
  367. }
  368. static void gic_cpu_sys_reg_init(void)
  369. {
  370. /* Enable system registers */
  371. gic_enable_sre();
  372. /* Set priority mask register */
  373. gic_write_pmr(DEFAULT_PMR_VALUE);
  374. /* EOI deactivates interrupt too (mode 0) */
  375. gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop_dir);
  376. /* ... and let's hit the road... */
  377. gic_write_grpen1(1);
  378. }
  379. static int gic_dist_supports_lpis(void)
  380. {
  381. return !!(readl_relaxed(gic_data.dist_base + GICD_TYPER) & GICD_TYPER_LPIS);
  382. }
  383. static void gic_cpu_init(void)
  384. {
  385. void __iomem *rbase;
  386. /* Register ourselves with the rest of the world */
  387. if (gic_populate_rdist())
  388. return;
  389. gic_enable_redist(true);
  390. rbase = gic_data_rdist_sgi_base();
  391. gic_cpu_config(rbase, gic_redist_wait_for_rwp);
  392. /* Give LPIs a spin */
  393. if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis())
  394. its_cpu_init();
  395. /* initialise system registers */
  396. gic_cpu_sys_reg_init();
  397. }
  398. #ifdef CONFIG_SMP
  399. static int gic_secondary_init(struct notifier_block *nfb,
  400. unsigned long action, void *hcpu)
  401. {
  402. if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
  403. gic_cpu_init();
  404. return NOTIFY_OK;
  405. }
  406. /*
  407. * Notifier for enabling the GIC CPU interface. Set an arbitrarily high
  408. * priority because the GIC needs to be up before the ARM generic timers.
  409. */
  410. static struct notifier_block gic_cpu_notifier = {
  411. .notifier_call = gic_secondary_init,
  412. .priority = 100,
  413. };
  414. static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
  415. u64 cluster_id)
  416. {
  417. int cpu = *base_cpu;
  418. u64 mpidr = cpu_logical_map(cpu);
  419. u16 tlist = 0;
  420. while (cpu < nr_cpu_ids) {
  421. /*
  422. * If we ever get a cluster of more than 16 CPUs, just
  423. * scream and skip that CPU.
  424. */
  425. if (WARN_ON((mpidr & 0xff) >= 16))
  426. goto out;
  427. tlist |= 1 << (mpidr & 0xf);
  428. cpu = cpumask_next(cpu, mask);
  429. if (cpu >= nr_cpu_ids)
  430. goto out;
  431. mpidr = cpu_logical_map(cpu);
  432. if (cluster_id != (mpidr & ~0xffUL)) {
  433. cpu--;
  434. goto out;
  435. }
  436. }
  437. out:
  438. *base_cpu = cpu;
  439. return tlist;
  440. }
  441. #define MPIDR_TO_SGI_AFFINITY(cluster_id, level) \
  442. (MPIDR_AFFINITY_LEVEL(cluster_id, level) \
  443. << ICC_SGI1R_AFFINITY_## level ##_SHIFT)
  444. static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq)
  445. {
  446. u64 val;
  447. val = (MPIDR_TO_SGI_AFFINITY(cluster_id, 3) |
  448. MPIDR_TO_SGI_AFFINITY(cluster_id, 2) |
  449. irq << ICC_SGI1R_SGI_ID_SHIFT |
  450. MPIDR_TO_SGI_AFFINITY(cluster_id, 1) |
  451. tlist << ICC_SGI1R_TARGET_LIST_SHIFT);
  452. pr_debug("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val);
  453. gic_write_sgi1r(val);
  454. }
  455. static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
  456. {
  457. int cpu;
  458. if (WARN_ON(irq >= 16))
  459. return;
  460. /*
  461. * Ensure that stores to Normal memory are visible to the
  462. * other CPUs before issuing the IPI.
  463. */
  464. smp_wmb();
  465. for_each_cpu(cpu, mask) {
  466. u64 cluster_id = cpu_logical_map(cpu) & ~0xffUL;
  467. u16 tlist;
  468. tlist = gic_compute_target_list(&cpu, mask, cluster_id);
  469. gic_send_sgi(cluster_id, tlist, irq);
  470. }
  471. /* Force the above writes to ICC_SGI1R_EL1 to be executed */
  472. isb();
  473. }
  474. static void gic_smp_init(void)
  475. {
  476. set_smp_cross_call(gic_raise_softirq);
  477. register_cpu_notifier(&gic_cpu_notifier);
  478. }
  479. static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
  480. bool force)
  481. {
  482. unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
  483. void __iomem *reg;
  484. int enabled;
  485. u64 val;
  486. if (gic_irq_in_rdist(d))
  487. return -EINVAL;
  488. /* If interrupt was enabled, disable it first */
  489. enabled = gic_peek_irq(d, GICD_ISENABLER);
  490. if (enabled)
  491. gic_mask_irq(d);
  492. reg = gic_dist_base(d) + GICD_IROUTER + (gic_irq(d) * 8);
  493. val = gic_mpidr_to_affinity(cpu_logical_map(cpu));
  494. writeq_relaxed(val, reg);
  495. /*
  496. * If the interrupt was enabled, enabled it again. Otherwise,
  497. * just wait for the distributor to have digested our changes.
  498. */
  499. if (enabled)
  500. gic_unmask_irq(d);
  501. else
  502. gic_dist_wait_for_rwp();
  503. return IRQ_SET_MASK_OK;
  504. }
  505. #else
  506. #define gic_set_affinity NULL
  507. #define gic_smp_init() do { } while(0)
  508. #endif
  509. #ifdef CONFIG_CPU_PM
  510. static int gic_cpu_pm_notifier(struct notifier_block *self,
  511. unsigned long cmd, void *v)
  512. {
  513. if (cmd == CPU_PM_EXIT) {
  514. gic_enable_redist(true);
  515. gic_cpu_sys_reg_init();
  516. } else if (cmd == CPU_PM_ENTER) {
  517. gic_write_grpen1(0);
  518. gic_enable_redist(false);
  519. }
  520. return NOTIFY_OK;
  521. }
  522. static struct notifier_block gic_cpu_pm_notifier_block = {
  523. .notifier_call = gic_cpu_pm_notifier,
  524. };
  525. static void gic_cpu_pm_init(void)
  526. {
  527. cpu_pm_register_notifier(&gic_cpu_pm_notifier_block);
  528. }
  529. #else
  530. static inline void gic_cpu_pm_init(void) { }
  531. #endif /* CONFIG_CPU_PM */
  532. static struct irq_chip gic_chip = {
  533. .name = "GICv3",
  534. .irq_mask = gic_mask_irq,
  535. .irq_unmask = gic_unmask_irq,
  536. .irq_eoi = gic_eoi_irq,
  537. .irq_set_type = gic_set_type,
  538. .irq_set_affinity = gic_set_affinity,
  539. .irq_get_irqchip_state = gic_irq_get_irqchip_state,
  540. .irq_set_irqchip_state = gic_irq_set_irqchip_state,
  541. };
  542. #define GIC_ID_NR (1U << gic_data.rdists.id_bits)
  543. static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
  544. irq_hw_number_t hw)
  545. {
  546. /* SGIs are private to the core kernel */
  547. if (hw < 16)
  548. return -EPERM;
  549. /* Nothing here */
  550. if (hw >= gic_data.irq_nr && hw < 8192)
  551. return -EPERM;
  552. /* Off limits */
  553. if (hw >= GIC_ID_NR)
  554. return -EPERM;
  555. /* PPIs */
  556. if (hw < 32) {
  557. irq_set_percpu_devid(irq);
  558. irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data,
  559. handle_percpu_devid_irq, NULL, NULL);
  560. set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN);
  561. }
  562. /* SPIs */
  563. if (hw >= 32 && hw < gic_data.irq_nr) {
  564. irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data,
  565. handle_fasteoi_irq, NULL, NULL);
  566. set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
  567. }
  568. /* LPIs */
  569. if (hw >= 8192 && hw < GIC_ID_NR) {
  570. if (!gic_dist_supports_lpis())
  571. return -EPERM;
  572. irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data,
  573. handle_fasteoi_irq, NULL, NULL);
  574. set_irq_flags(irq, IRQF_VALID);
  575. }
  576. return 0;
  577. }
  578. static int gic_irq_domain_xlate(struct irq_domain *d,
  579. struct device_node *controller,
  580. const u32 *intspec, unsigned int intsize,
  581. unsigned long *out_hwirq, unsigned int *out_type)
  582. {
  583. if (d->of_node != controller)
  584. return -EINVAL;
  585. if (intsize < 3)
  586. return -EINVAL;
  587. switch(intspec[0]) {
  588. case 0: /* SPI */
  589. *out_hwirq = intspec[1] + 32;
  590. break;
  591. case 1: /* PPI */
  592. *out_hwirq = intspec[1] + 16;
  593. break;
  594. case GIC_IRQ_TYPE_LPI: /* LPI */
  595. *out_hwirq = intspec[1];
  596. break;
  597. default:
  598. return -EINVAL;
  599. }
  600. *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
  601. return 0;
  602. }
  603. static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
  604. unsigned int nr_irqs, void *arg)
  605. {
  606. int i, ret;
  607. irq_hw_number_t hwirq;
  608. unsigned int type = IRQ_TYPE_NONE;
  609. struct of_phandle_args *irq_data = arg;
  610. ret = gic_irq_domain_xlate(domain, irq_data->np, irq_data->args,
  611. irq_data->args_count, &hwirq, &type);
  612. if (ret)
  613. return ret;
  614. for (i = 0; i < nr_irqs; i++)
  615. gic_irq_domain_map(domain, virq + i, hwirq + i);
  616. return 0;
  617. }
  618. static void gic_irq_domain_free(struct irq_domain *domain, unsigned int virq,
  619. unsigned int nr_irqs)
  620. {
  621. int i;
  622. for (i = 0; i < nr_irqs; i++) {
  623. struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
  624. irq_set_handler(virq + i, NULL);
  625. irq_domain_reset_irq_data(d);
  626. }
  627. }
  628. static const struct irq_domain_ops gic_irq_domain_ops = {
  629. .xlate = gic_irq_domain_xlate,
  630. .alloc = gic_irq_domain_alloc,
  631. .free = gic_irq_domain_free,
  632. };
  633. static int __init gic_of_init(struct device_node *node, struct device_node *parent)
  634. {
  635. void __iomem *dist_base;
  636. struct redist_region *rdist_regs;
  637. u64 redist_stride;
  638. u32 nr_redist_regions;
  639. u32 typer;
  640. u32 reg;
  641. int gic_irqs;
  642. int err;
  643. int i;
  644. dist_base = of_iomap(node, 0);
  645. if (!dist_base) {
  646. pr_err("%s: unable to map gic dist registers\n",
  647. node->full_name);
  648. return -ENXIO;
  649. }
  650. reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
  651. if (reg != GIC_PIDR2_ARCH_GICv3 && reg != GIC_PIDR2_ARCH_GICv4) {
  652. pr_err("%s: no distributor detected, giving up\n",
  653. node->full_name);
  654. err = -ENODEV;
  655. goto out_unmap_dist;
  656. }
  657. if (of_property_read_u32(node, "#redistributor-regions", &nr_redist_regions))
  658. nr_redist_regions = 1;
  659. rdist_regs = kzalloc(sizeof(*rdist_regs) * nr_redist_regions, GFP_KERNEL);
  660. if (!rdist_regs) {
  661. err = -ENOMEM;
  662. goto out_unmap_dist;
  663. }
  664. for (i = 0; i < nr_redist_regions; i++) {
  665. struct resource res;
  666. int ret;
  667. ret = of_address_to_resource(node, 1 + i, &res);
  668. rdist_regs[i].redist_base = of_iomap(node, 1 + i);
  669. if (ret || !rdist_regs[i].redist_base) {
  670. pr_err("%s: couldn't map region %d\n",
  671. node->full_name, i);
  672. err = -ENODEV;
  673. goto out_unmap_rdist;
  674. }
  675. rdist_regs[i].phys_base = res.start;
  676. }
  677. if (of_property_read_u64(node, "redistributor-stride", &redist_stride))
  678. redist_stride = 0;
  679. gic_data.dist_base = dist_base;
  680. gic_data.redist_regions = rdist_regs;
  681. gic_data.nr_redist_regions = nr_redist_regions;
  682. gic_data.redist_stride = redist_stride;
  683. /*
  684. * Find out how many interrupts are supported.
  685. * The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI)
  686. */
  687. typer = readl_relaxed(gic_data.dist_base + GICD_TYPER);
  688. gic_data.rdists.id_bits = GICD_TYPER_ID_BITS(typer);
  689. gic_irqs = GICD_TYPER_IRQS(typer);
  690. if (gic_irqs > 1020)
  691. gic_irqs = 1020;
  692. gic_data.irq_nr = gic_irqs;
  693. gic_data.domain = irq_domain_add_tree(node, &gic_irq_domain_ops,
  694. &gic_data);
  695. gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist));
  696. if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) {
  697. err = -ENOMEM;
  698. goto out_free;
  699. }
  700. set_handle_irq(gic_handle_irq);
  701. if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis())
  702. its_init(node, &gic_data.rdists, gic_data.domain);
  703. gic_smp_init();
  704. gic_dist_init();
  705. gic_cpu_init();
  706. gic_cpu_pm_init();
  707. return 0;
  708. out_free:
  709. if (gic_data.domain)
  710. irq_domain_remove(gic_data.domain);
  711. free_percpu(gic_data.rdists.rdist);
  712. out_unmap_rdist:
  713. for (i = 0; i < nr_redist_regions; i++)
  714. if (rdist_regs[i].redist_base)
  715. iounmap(rdist_regs[i].redist_base);
  716. kfree(rdist_regs);
  717. out_unmap_dist:
  718. iounmap(dist_base);
  719. return err;
  720. }
  721. IRQCHIP_DECLARE(gic_v3, "arm,gic-v3", gic_of_init);