irq-gic.c 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144
  1. /*
  2. * Copyright (C) 2002 ARM Limited, All Rights Reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. *
  8. * Interrupt architecture for the GIC:
  9. *
  10. * o There is one Interrupt Distributor, which receives interrupts
  11. * from system devices and sends them to the Interrupt Controllers.
  12. *
  13. * o There is one CPU Interface per CPU, which sends interrupts sent
  14. * by the Distributor, and interrupts generated locally, to the
  15. * associated CPU. The base address of the CPU interface is usually
  16. * aliased so that the same address points to different chips depending
  17. * on the CPU it is accessed from.
  18. *
  19. * Note that IRQs 0-31 are special - they are local to each CPU.
  20. * As such, the enable set/clear, pending set/clear and active bit
  21. * registers are banked per-cpu for these sources.
  22. */
  23. #include <linux/init.h>
  24. #include <linux/kernel.h>
  25. #include <linux/err.h>
  26. #include <linux/module.h>
  27. #include <linux/list.h>
  28. #include <linux/smp.h>
  29. #include <linux/cpu.h>
  30. #include <linux/cpu_pm.h>
  31. #include <linux/cpumask.h>
  32. #include <linux/io.h>
  33. #include <linux/of.h>
  34. #include <linux/of_address.h>
  35. #include <linux/of_irq.h>
  36. #include <linux/acpi.h>
  37. #include <linux/irqdomain.h>
  38. #include <linux/interrupt.h>
  39. #include <linux/percpu.h>
  40. #include <linux/slab.h>
  41. #include <linux/irqchip/chained_irq.h>
  42. #include <linux/irqchip/arm-gic.h>
  43. #include <linux/irqchip/arm-gic-acpi.h>
  44. #include <asm/cputype.h>
  45. #include <asm/irq.h>
  46. #include <asm/exception.h>
  47. #include <asm/smp_plat.h>
  48. #include "irq-gic-common.h"
  49. #include "irqchip.h"
  50. union gic_base {
  51. void __iomem *common_base;
  52. void __percpu * __iomem *percpu_base;
  53. };
  54. struct gic_chip_data {
  55. union gic_base dist_base;
  56. union gic_base cpu_base;
  57. #ifdef CONFIG_CPU_PM
  58. u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)];
  59. u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)];
  60. u32 saved_spi_target[DIV_ROUND_UP(1020, 4)];
  61. u32 __percpu *saved_ppi_enable;
  62. u32 __percpu *saved_ppi_conf;
  63. #endif
  64. struct irq_domain *domain;
  65. unsigned int gic_irqs;
  66. #ifdef CONFIG_GIC_NON_BANKED
  67. void __iomem *(*get_base)(union gic_base *);
  68. #endif
  69. };
  70. static DEFINE_RAW_SPINLOCK(irq_controller_lock);
  71. /*
  72. * The GIC mapping of CPU interfaces does not necessarily match
  73. * the logical CPU numbering. Let's use a mapping as returned
  74. * by the GIC itself.
  75. */
  76. #define NR_GIC_CPU_IF 8
  77. static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
  78. #ifndef MAX_GIC_NR
  79. #define MAX_GIC_NR 1
  80. #endif
  81. static struct gic_chip_data gic_data[MAX_GIC_NR] __read_mostly;
  82. #ifdef CONFIG_GIC_NON_BANKED
  83. static void __iomem *gic_get_percpu_base(union gic_base *base)
  84. {
  85. return raw_cpu_read(*base->percpu_base);
  86. }
  87. static void __iomem *gic_get_common_base(union gic_base *base)
  88. {
  89. return base->common_base;
  90. }
  91. static inline void __iomem *gic_data_dist_base(struct gic_chip_data *data)
  92. {
  93. return data->get_base(&data->dist_base);
  94. }
  95. static inline void __iomem *gic_data_cpu_base(struct gic_chip_data *data)
  96. {
  97. return data->get_base(&data->cpu_base);
  98. }
  99. static inline void gic_set_base_accessor(struct gic_chip_data *data,
  100. void __iomem *(*f)(union gic_base *))
  101. {
  102. data->get_base = f;
  103. }
  104. #else
  105. #define gic_data_dist_base(d) ((d)->dist_base.common_base)
  106. #define gic_data_cpu_base(d) ((d)->cpu_base.common_base)
  107. #define gic_set_base_accessor(d, f)
  108. #endif
  109. static inline void __iomem *gic_dist_base(struct irq_data *d)
  110. {
  111. struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
  112. return gic_data_dist_base(gic_data);
  113. }
  114. static inline void __iomem *gic_cpu_base(struct irq_data *d)
  115. {
  116. struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
  117. return gic_data_cpu_base(gic_data);
  118. }
  119. static inline unsigned int gic_irq(struct irq_data *d)
  120. {
  121. return d->hwirq;
  122. }
  123. /*
  124. * Routines to acknowledge, disable and enable interrupts
  125. */
  126. static void gic_poke_irq(struct irq_data *d, u32 offset)
  127. {
  128. u32 mask = 1 << (gic_irq(d) % 32);
  129. writel_relaxed(mask, gic_dist_base(d) + offset + (gic_irq(d) / 32) * 4);
  130. }
  131. static int gic_peek_irq(struct irq_data *d, u32 offset)
  132. {
  133. u32 mask = 1 << (gic_irq(d) % 32);
  134. return !!(readl_relaxed(gic_dist_base(d) + offset + (gic_irq(d) / 32) * 4) & mask);
  135. }
  136. static void gic_mask_irq(struct irq_data *d)
  137. {
  138. gic_poke_irq(d, GIC_DIST_ENABLE_CLEAR);
  139. }
  140. static void gic_unmask_irq(struct irq_data *d)
  141. {
  142. gic_poke_irq(d, GIC_DIST_ENABLE_SET);
  143. }
  144. static void gic_eoi_irq(struct irq_data *d)
  145. {
  146. writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI);
  147. }
  148. static int gic_irq_set_irqchip_state(struct irq_data *d,
  149. enum irqchip_irq_state which, bool val)
  150. {
  151. u32 reg;
  152. switch (which) {
  153. case IRQCHIP_STATE_PENDING:
  154. reg = val ? GIC_DIST_PENDING_SET : GIC_DIST_PENDING_CLEAR;
  155. break;
  156. case IRQCHIP_STATE_ACTIVE:
  157. reg = val ? GIC_DIST_ACTIVE_SET : GIC_DIST_ACTIVE_CLEAR;
  158. break;
  159. case IRQCHIP_STATE_MASKED:
  160. reg = val ? GIC_DIST_ENABLE_CLEAR : GIC_DIST_ENABLE_SET;
  161. break;
  162. default:
  163. return -EINVAL;
  164. }
  165. gic_poke_irq(d, reg);
  166. return 0;
  167. }
  168. static int gic_irq_get_irqchip_state(struct irq_data *d,
  169. enum irqchip_irq_state which, bool *val)
  170. {
  171. switch (which) {
  172. case IRQCHIP_STATE_PENDING:
  173. *val = gic_peek_irq(d, GIC_DIST_PENDING_SET);
  174. break;
  175. case IRQCHIP_STATE_ACTIVE:
  176. *val = gic_peek_irq(d, GIC_DIST_ACTIVE_SET);
  177. break;
  178. case IRQCHIP_STATE_MASKED:
  179. *val = !gic_peek_irq(d, GIC_DIST_ENABLE_SET);
  180. break;
  181. default:
  182. return -EINVAL;
  183. }
  184. return 0;
  185. }
  186. static int gic_set_type(struct irq_data *d, unsigned int type)
  187. {
  188. void __iomem *base = gic_dist_base(d);
  189. unsigned int gicirq = gic_irq(d);
  190. /* Interrupt configuration for SGIs can't be changed */
  191. if (gicirq < 16)
  192. return -EINVAL;
  193. /* SPIs have restrictions on the supported types */
  194. if (gicirq >= 32 && type != IRQ_TYPE_LEVEL_HIGH &&
  195. type != IRQ_TYPE_EDGE_RISING)
  196. return -EINVAL;
  197. return gic_configure_irq(gicirq, type, base, NULL);
  198. }
  199. #ifdef CONFIG_SMP
  200. static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
  201. bool force)
  202. {
  203. void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3);
  204. unsigned int cpu, shift = (gic_irq(d) % 4) * 8;
  205. u32 val, mask, bit;
  206. unsigned long flags;
  207. if (!force)
  208. cpu = cpumask_any_and(mask_val, cpu_online_mask);
  209. else
  210. cpu = cpumask_first(mask_val);
  211. if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)
  212. return -EINVAL;
  213. raw_spin_lock_irqsave(&irq_controller_lock, flags);
  214. mask = 0xff << shift;
  215. bit = gic_cpu_map[cpu] << shift;
  216. val = readl_relaxed(reg) & ~mask;
  217. writel_relaxed(val | bit, reg);
  218. raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
  219. return IRQ_SET_MASK_OK;
  220. }
  221. #endif
  222. static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
  223. {
  224. u32 irqstat, irqnr;
  225. struct gic_chip_data *gic = &gic_data[0];
  226. void __iomem *cpu_base = gic_data_cpu_base(gic);
  227. do {
  228. irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK);
  229. irqnr = irqstat & GICC_IAR_INT_ID_MASK;
  230. if (likely(irqnr > 15 && irqnr < 1021)) {
  231. handle_domain_irq(gic->domain, irqnr, regs);
  232. continue;
  233. }
  234. if (irqnr < 16) {
  235. writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
  236. #ifdef CONFIG_SMP
  237. handle_IPI(irqnr, regs);
  238. #endif
  239. continue;
  240. }
  241. break;
  242. } while (1);
  243. }
  244. static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
  245. {
  246. struct gic_chip_data *chip_data = irq_get_handler_data(irq);
  247. struct irq_chip *chip = irq_get_chip(irq);
  248. unsigned int cascade_irq, gic_irq;
  249. unsigned long status;
  250. chained_irq_enter(chip, desc);
  251. raw_spin_lock(&irq_controller_lock);
  252. status = readl_relaxed(gic_data_cpu_base(chip_data) + GIC_CPU_INTACK);
  253. raw_spin_unlock(&irq_controller_lock);
  254. gic_irq = (status & GICC_IAR_INT_ID_MASK);
  255. if (gic_irq == GICC_INT_SPURIOUS)
  256. goto out;
  257. cascade_irq = irq_find_mapping(chip_data->domain, gic_irq);
  258. if (unlikely(gic_irq < 32 || gic_irq > 1020))
  259. handle_bad_irq(cascade_irq, desc);
  260. else
  261. generic_handle_irq(cascade_irq);
  262. out:
  263. chained_irq_exit(chip, desc);
  264. }
  265. static struct irq_chip gic_chip = {
  266. .name = "GIC",
  267. .irq_mask = gic_mask_irq,
  268. .irq_unmask = gic_unmask_irq,
  269. .irq_eoi = gic_eoi_irq,
  270. .irq_set_type = gic_set_type,
  271. #ifdef CONFIG_SMP
  272. .irq_set_affinity = gic_set_affinity,
  273. #endif
  274. .irq_get_irqchip_state = gic_irq_get_irqchip_state,
  275. .irq_set_irqchip_state = gic_irq_set_irqchip_state,
  276. };
  277. void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
  278. {
  279. if (gic_nr >= MAX_GIC_NR)
  280. BUG();
  281. if (irq_set_handler_data(irq, &gic_data[gic_nr]) != 0)
  282. BUG();
  283. irq_set_chained_handler(irq, gic_handle_cascade_irq);
  284. }
  285. static u8 gic_get_cpumask(struct gic_chip_data *gic)
  286. {
  287. void __iomem *base = gic_data_dist_base(gic);
  288. u32 mask, i;
  289. for (i = mask = 0; i < 32; i += 4) {
  290. mask = readl_relaxed(base + GIC_DIST_TARGET + i);
  291. mask |= mask >> 16;
  292. mask |= mask >> 8;
  293. if (mask)
  294. break;
  295. }
  296. if (!mask && num_possible_cpus() > 1)
  297. pr_crit("GIC CPU mask not found - kernel will fail to boot.\n");
  298. return mask;
  299. }
  300. static void gic_cpu_if_up(void)
  301. {
  302. void __iomem *cpu_base = gic_data_cpu_base(&gic_data[0]);
  303. u32 bypass = 0;
  304. /*
  305. * Preserve bypass disable bits to be written back later
  306. */
  307. bypass = readl(cpu_base + GIC_CPU_CTRL);
  308. bypass &= GICC_DIS_BYPASS_MASK;
  309. writel_relaxed(bypass | GICC_ENABLE, cpu_base + GIC_CPU_CTRL);
  310. }
  311. static void __init gic_dist_init(struct gic_chip_data *gic)
  312. {
  313. unsigned int i;
  314. u32 cpumask;
  315. unsigned int gic_irqs = gic->gic_irqs;
  316. void __iomem *base = gic_data_dist_base(gic);
  317. writel_relaxed(GICD_DISABLE, base + GIC_DIST_CTRL);
  318. /*
  319. * Set all global interrupts to this CPU only.
  320. */
  321. cpumask = gic_get_cpumask(gic);
  322. cpumask |= cpumask << 8;
  323. cpumask |= cpumask << 16;
  324. for (i = 32; i < gic_irqs; i += 4)
  325. writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);
  326. gic_dist_config(base, gic_irqs, NULL);
  327. writel_relaxed(GICD_ENABLE, base + GIC_DIST_CTRL);
  328. }
  329. static void gic_cpu_init(struct gic_chip_data *gic)
  330. {
  331. void __iomem *dist_base = gic_data_dist_base(gic);
  332. void __iomem *base = gic_data_cpu_base(gic);
  333. unsigned int cpu_mask, cpu = smp_processor_id();
  334. int i;
  335. /*
  336. * Get what the GIC says our CPU mask is.
  337. */
  338. BUG_ON(cpu >= NR_GIC_CPU_IF);
  339. cpu_mask = gic_get_cpumask(gic);
  340. gic_cpu_map[cpu] = cpu_mask;
  341. /*
  342. * Clear our mask from the other map entries in case they're
  343. * still undefined.
  344. */
  345. for (i = 0; i < NR_GIC_CPU_IF; i++)
  346. if (i != cpu)
  347. gic_cpu_map[i] &= ~cpu_mask;
  348. gic_cpu_config(dist_base, NULL);
  349. writel_relaxed(GICC_INT_PRI_THRESHOLD, base + GIC_CPU_PRIMASK);
  350. gic_cpu_if_up();
  351. }
  352. void gic_cpu_if_down(void)
  353. {
  354. void __iomem *cpu_base = gic_data_cpu_base(&gic_data[0]);
  355. u32 val = 0;
  356. val = readl(cpu_base + GIC_CPU_CTRL);
  357. val &= ~GICC_ENABLE;
  358. writel_relaxed(val, cpu_base + GIC_CPU_CTRL);
  359. }
  360. #ifdef CONFIG_CPU_PM
  361. /*
  362. * Saves the GIC distributor registers during suspend or idle. Must be called
  363. * with interrupts disabled but before powering down the GIC. After calling
  364. * this function, no interrupts will be delivered by the GIC, and another
  365. * platform-specific wakeup source must be enabled.
  366. */
  367. static void gic_dist_save(unsigned int gic_nr)
  368. {
  369. unsigned int gic_irqs;
  370. void __iomem *dist_base;
  371. int i;
  372. if (gic_nr >= MAX_GIC_NR)
  373. BUG();
  374. gic_irqs = gic_data[gic_nr].gic_irqs;
  375. dist_base = gic_data_dist_base(&gic_data[gic_nr]);
  376. if (!dist_base)
  377. return;
  378. for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
  379. gic_data[gic_nr].saved_spi_conf[i] =
  380. readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
  381. for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
  382. gic_data[gic_nr].saved_spi_target[i] =
  383. readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4);
  384. for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
  385. gic_data[gic_nr].saved_spi_enable[i] =
  386. readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
  387. }
  388. /*
  389. * Restores the GIC distributor registers during resume or when coming out of
  390. * idle. Must be called before enabling interrupts. If a level interrupt
  391. * that occured while the GIC was suspended is still present, it will be
  392. * handled normally, but any edge interrupts that occured will not be seen by
  393. * the GIC and need to be handled by the platform-specific wakeup source.
  394. */
  395. static void gic_dist_restore(unsigned int gic_nr)
  396. {
  397. unsigned int gic_irqs;
  398. unsigned int i;
  399. void __iomem *dist_base;
  400. if (gic_nr >= MAX_GIC_NR)
  401. BUG();
  402. gic_irqs = gic_data[gic_nr].gic_irqs;
  403. dist_base = gic_data_dist_base(&gic_data[gic_nr]);
  404. if (!dist_base)
  405. return;
  406. writel_relaxed(GICD_DISABLE, dist_base + GIC_DIST_CTRL);
  407. for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
  408. writel_relaxed(gic_data[gic_nr].saved_spi_conf[i],
  409. dist_base + GIC_DIST_CONFIG + i * 4);
  410. for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
  411. writel_relaxed(GICD_INT_DEF_PRI_X4,
  412. dist_base + GIC_DIST_PRI + i * 4);
  413. for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
  414. writel_relaxed(gic_data[gic_nr].saved_spi_target[i],
  415. dist_base + GIC_DIST_TARGET + i * 4);
  416. for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
  417. writel_relaxed(gic_data[gic_nr].saved_spi_enable[i],
  418. dist_base + GIC_DIST_ENABLE_SET + i * 4);
  419. writel_relaxed(GICD_ENABLE, dist_base + GIC_DIST_CTRL);
  420. }
  421. static void gic_cpu_save(unsigned int gic_nr)
  422. {
  423. int i;
  424. u32 *ptr;
  425. void __iomem *dist_base;
  426. void __iomem *cpu_base;
  427. if (gic_nr >= MAX_GIC_NR)
  428. BUG();
  429. dist_base = gic_data_dist_base(&gic_data[gic_nr]);
  430. cpu_base = gic_data_cpu_base(&gic_data[gic_nr]);
  431. if (!dist_base || !cpu_base)
  432. return;
  433. ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_enable);
  434. for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
  435. ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
  436. ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
  437. for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
  438. ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
  439. }
  440. static void gic_cpu_restore(unsigned int gic_nr)
  441. {
  442. int i;
  443. u32 *ptr;
  444. void __iomem *dist_base;
  445. void __iomem *cpu_base;
  446. if (gic_nr >= MAX_GIC_NR)
  447. BUG();
  448. dist_base = gic_data_dist_base(&gic_data[gic_nr]);
  449. cpu_base = gic_data_cpu_base(&gic_data[gic_nr]);
  450. if (!dist_base || !cpu_base)
  451. return;
  452. ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_enable);
  453. for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
  454. writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4);
  455. ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
  456. for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
  457. writel_relaxed(ptr[i], dist_base + GIC_DIST_CONFIG + i * 4);
  458. for (i = 0; i < DIV_ROUND_UP(32, 4); i++)
  459. writel_relaxed(GICD_INT_DEF_PRI_X4,
  460. dist_base + GIC_DIST_PRI + i * 4);
  461. writel_relaxed(GICC_INT_PRI_THRESHOLD, cpu_base + GIC_CPU_PRIMASK);
  462. gic_cpu_if_up();
  463. }
  464. static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v)
  465. {
  466. int i;
  467. for (i = 0; i < MAX_GIC_NR; i++) {
  468. #ifdef CONFIG_GIC_NON_BANKED
  469. /* Skip over unused GICs */
  470. if (!gic_data[i].get_base)
  471. continue;
  472. #endif
  473. switch (cmd) {
  474. case CPU_PM_ENTER:
  475. gic_cpu_save(i);
  476. break;
  477. case CPU_PM_ENTER_FAILED:
  478. case CPU_PM_EXIT:
  479. gic_cpu_restore(i);
  480. break;
  481. case CPU_CLUSTER_PM_ENTER:
  482. gic_dist_save(i);
  483. break;
  484. case CPU_CLUSTER_PM_ENTER_FAILED:
  485. case CPU_CLUSTER_PM_EXIT:
  486. gic_dist_restore(i);
  487. break;
  488. }
  489. }
  490. return NOTIFY_OK;
  491. }
  492. static struct notifier_block gic_notifier_block = {
  493. .notifier_call = gic_notifier,
  494. };
  495. static void __init gic_pm_init(struct gic_chip_data *gic)
  496. {
  497. gic->saved_ppi_enable = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4,
  498. sizeof(u32));
  499. BUG_ON(!gic->saved_ppi_enable);
  500. gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4,
  501. sizeof(u32));
  502. BUG_ON(!gic->saved_ppi_conf);
  503. if (gic == &gic_data[0])
  504. cpu_pm_register_notifier(&gic_notifier_block);
  505. }
  506. #else
  507. static void __init gic_pm_init(struct gic_chip_data *gic)
  508. {
  509. }
  510. #endif
  511. #ifdef CONFIG_SMP
  512. static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
  513. {
  514. int cpu;
  515. unsigned long flags, map = 0;
  516. raw_spin_lock_irqsave(&irq_controller_lock, flags);
  517. /* Convert our logical CPU mask into a physical one. */
  518. for_each_cpu(cpu, mask)
  519. map |= gic_cpu_map[cpu];
  520. /*
  521. * Ensure that stores to Normal memory are visible to the
  522. * other CPUs before they observe us issuing the IPI.
  523. */
  524. dmb(ishst);
  525. /* this always happens on GIC0 */
  526. writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
  527. raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
  528. }
  529. #endif
  530. #ifdef CONFIG_BL_SWITCHER
  531. /*
  532. * gic_send_sgi - send a SGI directly to given CPU interface number
  533. *
  534. * cpu_id: the ID for the destination CPU interface
  535. * irq: the IPI number to send a SGI for
  536. */
  537. void gic_send_sgi(unsigned int cpu_id, unsigned int irq)
  538. {
  539. BUG_ON(cpu_id >= NR_GIC_CPU_IF);
  540. cpu_id = 1 << cpu_id;
  541. /* this always happens on GIC0 */
  542. writel_relaxed((cpu_id << 16) | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
  543. }
  544. /*
  545. * gic_get_cpu_id - get the CPU interface ID for the specified CPU
  546. *
  547. * @cpu: the logical CPU number to get the GIC ID for.
  548. *
  549. * Return the CPU interface ID for the given logical CPU number,
  550. * or -1 if the CPU number is too large or the interface ID is
  551. * unknown (more than one bit set).
  552. */
  553. int gic_get_cpu_id(unsigned int cpu)
  554. {
  555. unsigned int cpu_bit;
  556. if (cpu >= NR_GIC_CPU_IF)
  557. return -1;
  558. cpu_bit = gic_cpu_map[cpu];
  559. if (cpu_bit & (cpu_bit - 1))
  560. return -1;
  561. return __ffs(cpu_bit);
  562. }
  563. /*
  564. * gic_migrate_target - migrate IRQs to another CPU interface
  565. *
  566. * @new_cpu_id: the CPU target ID to migrate IRQs to
  567. *
  568. * Migrate all peripheral interrupts with a target matching the current CPU
  569. * to the interface corresponding to @new_cpu_id. The CPU interface mapping
  570. * is also updated. Targets to other CPU interfaces are unchanged.
  571. * This must be called with IRQs locally disabled.
  572. */
  573. void gic_migrate_target(unsigned int new_cpu_id)
  574. {
  575. unsigned int cur_cpu_id, gic_irqs, gic_nr = 0;
  576. void __iomem *dist_base;
  577. int i, ror_val, cpu = smp_processor_id();
  578. u32 val, cur_target_mask, active_mask;
  579. if (gic_nr >= MAX_GIC_NR)
  580. BUG();
  581. dist_base = gic_data_dist_base(&gic_data[gic_nr]);
  582. if (!dist_base)
  583. return;
  584. gic_irqs = gic_data[gic_nr].gic_irqs;
  585. cur_cpu_id = __ffs(gic_cpu_map[cpu]);
  586. cur_target_mask = 0x01010101 << cur_cpu_id;
  587. ror_val = (cur_cpu_id - new_cpu_id) & 31;
  588. raw_spin_lock(&irq_controller_lock);
  589. /* Update the target interface for this logical CPU */
  590. gic_cpu_map[cpu] = 1 << new_cpu_id;
  591. /*
  592. * Find all the peripheral interrupts targetting the current
  593. * CPU interface and migrate them to the new CPU interface.
  594. * We skip DIST_TARGET 0 to 7 as they are read-only.
  595. */
  596. for (i = 8; i < DIV_ROUND_UP(gic_irqs, 4); i++) {
  597. val = readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4);
  598. active_mask = val & cur_target_mask;
  599. if (active_mask) {
  600. val &= ~active_mask;
  601. val |= ror32(active_mask, ror_val);
  602. writel_relaxed(val, dist_base + GIC_DIST_TARGET + i*4);
  603. }
  604. }
  605. raw_spin_unlock(&irq_controller_lock);
  606. /*
  607. * Now let's migrate and clear any potential SGIs that might be
  608. * pending for us (cur_cpu_id). Since GIC_DIST_SGI_PENDING_SET
  609. * is a banked register, we can only forward the SGI using
  610. * GIC_DIST_SOFTINT. The original SGI source is lost but Linux
  611. * doesn't use that information anyway.
  612. *
  613. * For the same reason we do not adjust SGI source information
  614. * for previously sent SGIs by us to other CPUs either.
  615. */
  616. for (i = 0; i < 16; i += 4) {
  617. int j;
  618. val = readl_relaxed(dist_base + GIC_DIST_SGI_PENDING_SET + i);
  619. if (!val)
  620. continue;
  621. writel_relaxed(val, dist_base + GIC_DIST_SGI_PENDING_CLEAR + i);
  622. for (j = i; j < i + 4; j++) {
  623. if (val & 0xff)
  624. writel_relaxed((1 << (new_cpu_id + 16)) | j,
  625. dist_base + GIC_DIST_SOFTINT);
  626. val >>= 8;
  627. }
  628. }
  629. }
  630. /*
  631. * gic_get_sgir_physaddr - get the physical address for the SGI register
  632. *
  633. * REturn the physical address of the SGI register to be used
  634. * by some early assembly code when the kernel is not yet available.
  635. */
  636. static unsigned long gic_dist_physaddr;
  637. unsigned long gic_get_sgir_physaddr(void)
  638. {
  639. if (!gic_dist_physaddr)
  640. return 0;
  641. return gic_dist_physaddr + GIC_DIST_SOFTINT;
  642. }
  643. void __init gic_init_physaddr(struct device_node *node)
  644. {
  645. struct resource res;
  646. if (of_address_to_resource(node, 0, &res) == 0) {
  647. gic_dist_physaddr = res.start;
  648. pr_info("GIC physical location is %#lx\n", gic_dist_physaddr);
  649. }
  650. }
  651. #else
  652. #define gic_init_physaddr(node) do { } while (0)
  653. #endif
  654. static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
  655. irq_hw_number_t hw)
  656. {
  657. if (hw < 32) {
  658. irq_set_percpu_devid(irq);
  659. irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data,
  660. handle_percpu_devid_irq, NULL, NULL);
  661. set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN);
  662. } else {
  663. irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data,
  664. handle_fasteoi_irq, NULL, NULL);
  665. set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
  666. }
  667. return 0;
  668. }
  669. static void gic_irq_domain_unmap(struct irq_domain *d, unsigned int irq)
  670. {
  671. }
  672. static int gic_irq_domain_xlate(struct irq_domain *d,
  673. struct device_node *controller,
  674. const u32 *intspec, unsigned int intsize,
  675. unsigned long *out_hwirq, unsigned int *out_type)
  676. {
  677. unsigned long ret = 0;
  678. if (d->of_node != controller)
  679. return -EINVAL;
  680. if (intsize < 3)
  681. return -EINVAL;
  682. /* Get the interrupt number and add 16 to skip over SGIs */
  683. *out_hwirq = intspec[1] + 16;
  684. /* For SPIs, we need to add 16 more to get the GIC irq ID number */
  685. if (!intspec[0])
  686. *out_hwirq += 16;
  687. *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
  688. return ret;
  689. }
  690. #ifdef CONFIG_SMP
  691. static int gic_secondary_init(struct notifier_block *nfb, unsigned long action,
  692. void *hcpu)
  693. {
  694. if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
  695. gic_cpu_init(&gic_data[0]);
  696. return NOTIFY_OK;
  697. }
  698. /*
  699. * Notifier for enabling the GIC CPU interface. Set an arbitrarily high
  700. * priority because the GIC needs to be up before the ARM generic timers.
  701. */
  702. static struct notifier_block gic_cpu_notifier = {
  703. .notifier_call = gic_secondary_init,
  704. .priority = 100,
  705. };
  706. #endif
  707. static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
  708. unsigned int nr_irqs, void *arg)
  709. {
  710. int i, ret;
  711. irq_hw_number_t hwirq;
  712. unsigned int type = IRQ_TYPE_NONE;
  713. struct of_phandle_args *irq_data = arg;
  714. ret = gic_irq_domain_xlate(domain, irq_data->np, irq_data->args,
  715. irq_data->args_count, &hwirq, &type);
  716. if (ret)
  717. return ret;
  718. for (i = 0; i < nr_irqs; i++)
  719. gic_irq_domain_map(domain, virq + i, hwirq + i);
  720. return 0;
  721. }
  722. static const struct irq_domain_ops gic_irq_domain_hierarchy_ops = {
  723. .xlate = gic_irq_domain_xlate,
  724. .alloc = gic_irq_domain_alloc,
  725. .free = irq_domain_free_irqs_top,
  726. };
  727. static const struct irq_domain_ops gic_irq_domain_ops = {
  728. .map = gic_irq_domain_map,
  729. .unmap = gic_irq_domain_unmap,
  730. .xlate = gic_irq_domain_xlate,
  731. };
  732. void gic_set_irqchip_flags(unsigned long flags)
  733. {
  734. gic_chip.flags |= flags;
  735. }
  736. void __init gic_init_bases(unsigned int gic_nr, int irq_start,
  737. void __iomem *dist_base, void __iomem *cpu_base,
  738. u32 percpu_offset, struct device_node *node)
  739. {
  740. irq_hw_number_t hwirq_base;
  741. struct gic_chip_data *gic;
  742. int gic_irqs, irq_base, i;
  743. BUG_ON(gic_nr >= MAX_GIC_NR);
  744. gic = &gic_data[gic_nr];
  745. #ifdef CONFIG_GIC_NON_BANKED
  746. if (percpu_offset) { /* Frankein-GIC without banked registers... */
  747. unsigned int cpu;
  748. gic->dist_base.percpu_base = alloc_percpu(void __iomem *);
  749. gic->cpu_base.percpu_base = alloc_percpu(void __iomem *);
  750. if (WARN_ON(!gic->dist_base.percpu_base ||
  751. !gic->cpu_base.percpu_base)) {
  752. free_percpu(gic->dist_base.percpu_base);
  753. free_percpu(gic->cpu_base.percpu_base);
  754. return;
  755. }
  756. for_each_possible_cpu(cpu) {
  757. u32 mpidr = cpu_logical_map(cpu);
  758. u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
  759. unsigned long offset = percpu_offset * core_id;
  760. *per_cpu_ptr(gic->dist_base.percpu_base, cpu) = dist_base + offset;
  761. *per_cpu_ptr(gic->cpu_base.percpu_base, cpu) = cpu_base + offset;
  762. }
  763. gic_set_base_accessor(gic, gic_get_percpu_base);
  764. } else
  765. #endif
  766. { /* Normal, sane GIC... */
  767. WARN(percpu_offset,
  768. "GIC_NON_BANKED not enabled, ignoring %08x offset!",
  769. percpu_offset);
  770. gic->dist_base.common_base = dist_base;
  771. gic->cpu_base.common_base = cpu_base;
  772. gic_set_base_accessor(gic, gic_get_common_base);
  773. }
  774. /*
  775. * Initialize the CPU interface map to all CPUs.
  776. * It will be refined as each CPU probes its ID.
  777. */
  778. for (i = 0; i < NR_GIC_CPU_IF; i++)
  779. gic_cpu_map[i] = 0xff;
  780. /*
  781. * Find out how many interrupts are supported.
  782. * The GIC only supports up to 1020 interrupt sources.
  783. */
  784. gic_irqs = readl_relaxed(gic_data_dist_base(gic) + GIC_DIST_CTR) & 0x1f;
  785. gic_irqs = (gic_irqs + 1) * 32;
  786. if (gic_irqs > 1020)
  787. gic_irqs = 1020;
  788. gic->gic_irqs = gic_irqs;
  789. if (node) { /* DT case */
  790. gic->domain = irq_domain_add_linear(node, gic_irqs,
  791. &gic_irq_domain_hierarchy_ops,
  792. gic);
  793. } else { /* Non-DT case */
  794. /*
  795. * For primary GICs, skip over SGIs.
  796. * For secondary GICs, skip over PPIs, too.
  797. */
  798. if (gic_nr == 0 && (irq_start & 31) > 0) {
  799. hwirq_base = 16;
  800. if (irq_start != -1)
  801. irq_start = (irq_start & ~31) + 16;
  802. } else {
  803. hwirq_base = 32;
  804. }
  805. gic_irqs -= hwirq_base; /* calculate # of irqs to allocate */
  806. irq_base = irq_alloc_descs(irq_start, 16, gic_irqs,
  807. numa_node_id());
  808. if (IS_ERR_VALUE(irq_base)) {
  809. WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n",
  810. irq_start);
  811. irq_base = irq_start;
  812. }
  813. gic->domain = irq_domain_add_legacy(node, gic_irqs, irq_base,
  814. hwirq_base, &gic_irq_domain_ops, gic);
  815. }
  816. if (WARN_ON(!gic->domain))
  817. return;
  818. if (gic_nr == 0) {
  819. #ifdef CONFIG_SMP
  820. set_smp_cross_call(gic_raise_softirq);
  821. register_cpu_notifier(&gic_cpu_notifier);
  822. #endif
  823. set_handle_irq(gic_handle_irq);
  824. }
  825. gic_dist_init(gic);
  826. gic_cpu_init(gic);
  827. gic_pm_init(gic);
  828. }
  829. #ifdef CONFIG_OF
  830. static int gic_cnt __initdata;
  831. static int __init
  832. gic_of_init(struct device_node *node, struct device_node *parent)
  833. {
  834. void __iomem *cpu_base;
  835. void __iomem *dist_base;
  836. u32 percpu_offset;
  837. int irq;
  838. if (WARN_ON(!node))
  839. return -ENODEV;
  840. dist_base = of_iomap(node, 0);
  841. WARN(!dist_base, "unable to map gic dist registers\n");
  842. cpu_base = of_iomap(node, 1);
  843. WARN(!cpu_base, "unable to map gic cpu registers\n");
  844. if (of_property_read_u32(node, "cpu-offset", &percpu_offset))
  845. percpu_offset = 0;
  846. gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset, node);
  847. if (!gic_cnt)
  848. gic_init_physaddr(node);
  849. if (parent) {
  850. irq = irq_of_parse_and_map(node, 0);
  851. gic_cascade_irq(gic_cnt, irq);
  852. }
  853. if (IS_ENABLED(CONFIG_ARM_GIC_V2M))
  854. gicv2m_of_init(node, gic_data[gic_cnt].domain);
  855. gic_cnt++;
  856. return 0;
  857. }
  858. IRQCHIP_DECLARE(gic_400, "arm,gic-400", gic_of_init);
  859. IRQCHIP_DECLARE(arm11mp_gic, "arm,arm11mp-gic", gic_of_init);
  860. IRQCHIP_DECLARE(arm1176jzf_dc_gic, "arm,arm1176jzf-devchip-gic", gic_of_init);
  861. IRQCHIP_DECLARE(cortex_a15_gic, "arm,cortex-a15-gic", gic_of_init);
  862. IRQCHIP_DECLARE(cortex_a9_gic, "arm,cortex-a9-gic", gic_of_init);
  863. IRQCHIP_DECLARE(cortex_a7_gic, "arm,cortex-a7-gic", gic_of_init);
  864. IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init);
  865. IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init);
  866. #endif
  867. #ifdef CONFIG_ACPI
  868. static phys_addr_t dist_phy_base, cpu_phy_base __initdata;
  869. static int __init
  870. gic_acpi_parse_madt_cpu(struct acpi_subtable_header *header,
  871. const unsigned long end)
  872. {
  873. struct acpi_madt_generic_interrupt *processor;
  874. phys_addr_t gic_cpu_base;
  875. static int cpu_base_assigned;
  876. processor = (struct acpi_madt_generic_interrupt *)header;
  877. if (BAD_MADT_ENTRY(processor, end))
  878. return -EINVAL;
  879. /*
  880. * There is no support for non-banked GICv1/2 register in ACPI spec.
  881. * All CPU interface addresses have to be the same.
  882. */
  883. gic_cpu_base = processor->base_address;
  884. if (cpu_base_assigned && gic_cpu_base != cpu_phy_base)
  885. return -EINVAL;
  886. cpu_phy_base = gic_cpu_base;
  887. cpu_base_assigned = 1;
  888. return 0;
  889. }
  890. static int __init
  891. gic_acpi_parse_madt_distributor(struct acpi_subtable_header *header,
  892. const unsigned long end)
  893. {
  894. struct acpi_madt_generic_distributor *dist;
  895. dist = (struct acpi_madt_generic_distributor *)header;
  896. if (BAD_MADT_ENTRY(dist, end))
  897. return -EINVAL;
  898. dist_phy_base = dist->base_address;
  899. return 0;
  900. }
  901. int __init
  902. gic_v2_acpi_init(struct acpi_table_header *table)
  903. {
  904. void __iomem *cpu_base, *dist_base;
  905. int count;
  906. /* Collect CPU base addresses */
  907. count = acpi_parse_entries(ACPI_SIG_MADT,
  908. sizeof(struct acpi_table_madt),
  909. gic_acpi_parse_madt_cpu, table,
  910. ACPI_MADT_TYPE_GENERIC_INTERRUPT, 0);
  911. if (count <= 0) {
  912. pr_err("No valid GICC entries exist\n");
  913. return -EINVAL;
  914. }
  915. /*
  916. * Find distributor base address. We expect one distributor entry since
  917. * ACPI 5.1 spec neither support multi-GIC instances nor GIC cascade.
  918. */
  919. count = acpi_parse_entries(ACPI_SIG_MADT,
  920. sizeof(struct acpi_table_madt),
  921. gic_acpi_parse_madt_distributor, table,
  922. ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR, 0);
  923. if (count <= 0) {
  924. pr_err("No valid GICD entries exist\n");
  925. return -EINVAL;
  926. } else if (count > 1) {
  927. pr_err("More than one GICD entry detected\n");
  928. return -EINVAL;
  929. }
  930. cpu_base = ioremap(cpu_phy_base, ACPI_GIC_CPU_IF_MEM_SIZE);
  931. if (!cpu_base) {
  932. pr_err("Unable to map GICC registers\n");
  933. return -ENOMEM;
  934. }
  935. dist_base = ioremap(dist_phy_base, ACPI_GICV2_DIST_MEM_SIZE);
  936. if (!dist_base) {
  937. pr_err("Unable to map GICD registers\n");
  938. iounmap(cpu_base);
  939. return -ENOMEM;
  940. }
  941. /*
  942. * Initialize zero GIC instance (no multi-GIC support). Also, set GIC
  943. * as default IRQ domain to allow for GSI registration and GSI to IRQ
  944. * number translation (see acpi_register_gsi() and acpi_gsi_to_irq()).
  945. */
  946. gic_init_bases(0, -1, dist_base, cpu_base, 0, NULL);
  947. irq_set_default_host(gic_data[0].domain);
  948. acpi_irq_model = ACPI_IRQ_MODEL_GIC;
  949. return 0;
  950. }
  951. #endif