irq-gic.c 29 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145
  1. /*
  2. * Copyright (C) 2002 ARM Limited, All Rights Reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. *
  8. * Interrupt architecture for the GIC:
  9. *
  10. * o There is one Interrupt Distributor, which receives interrupts
  11. * from system devices and sends them to the Interrupt Controllers.
  12. *
  13. * o There is one CPU Interface per CPU, which sends interrupts sent
  14. * by the Distributor, and interrupts generated locally, to the
  15. * associated CPU. The base address of the CPU interface is usually
  16. * aliased so that the same address points to different chips depending
  17. * on the CPU it is accessed from.
  18. *
  19. * Note that IRQs 0-31 are special - they are local to each CPU.
  20. * As such, the enable set/clear, pending set/clear and active bit
  21. * registers are banked per-cpu for these sources.
  22. */
  23. #include <linux/init.h>
  24. #include <linux/kernel.h>
  25. #include <linux/err.h>
  26. #include <linux/module.h>
  27. #include <linux/list.h>
  28. #include <linux/smp.h>
  29. #include <linux/cpu.h>
  30. #include <linux/cpu_pm.h>
  31. #include <linux/cpumask.h>
  32. #include <linux/io.h>
  33. #include <linux/of.h>
  34. #include <linux/of_address.h>
  35. #include <linux/of_irq.h>
  36. #include <linux/acpi.h>
  37. #include <linux/irqdomain.h>
  38. #include <linux/interrupt.h>
  39. #include <linux/percpu.h>
  40. #include <linux/slab.h>
  41. #include <linux/irqchip/chained_irq.h>
  42. #include <linux/irqchip/arm-gic.h>
  43. #include <linux/irqchip/arm-gic-acpi.h>
  44. #include <asm/cputype.h>
  45. #include <asm/irq.h>
  46. #include <asm/exception.h>
  47. #include <asm/smp_plat.h>
  48. #include "irq-gic-common.h"
  49. #include "irqchip.h"
  50. union gic_base {
  51. void __iomem *common_base;
  52. void __percpu * __iomem *percpu_base;
  53. };
  54. struct gic_chip_data {
  55. union gic_base dist_base;
  56. union gic_base cpu_base;
  57. #ifdef CONFIG_CPU_PM
  58. u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)];
  59. u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)];
  60. u32 saved_spi_target[DIV_ROUND_UP(1020, 4)];
  61. u32 __percpu *saved_ppi_enable;
  62. u32 __percpu *saved_ppi_conf;
  63. #endif
  64. struct irq_domain *domain;
  65. unsigned int gic_irqs;
  66. #ifdef CONFIG_GIC_NON_BANKED
  67. void __iomem *(*get_base)(union gic_base *);
  68. #endif
  69. };
  70. static DEFINE_RAW_SPINLOCK(irq_controller_lock);
  71. /*
  72. * The GIC mapping of CPU interfaces does not necessarily match
  73. * the logical CPU numbering. Let's use a mapping as returned
  74. * by the GIC itself.
  75. */
  76. #define NR_GIC_CPU_IF 8
  77. static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
  78. #ifndef MAX_GIC_NR
  79. #define MAX_GIC_NR 1
  80. #endif
  81. static struct gic_chip_data gic_data[MAX_GIC_NR] __read_mostly;
  82. #ifdef CONFIG_GIC_NON_BANKED
  83. static void __iomem *gic_get_percpu_base(union gic_base *base)
  84. {
  85. return raw_cpu_read(*base->percpu_base);
  86. }
  87. static void __iomem *gic_get_common_base(union gic_base *base)
  88. {
  89. return base->common_base;
  90. }
  91. static inline void __iomem *gic_data_dist_base(struct gic_chip_data *data)
  92. {
  93. return data->get_base(&data->dist_base);
  94. }
  95. static inline void __iomem *gic_data_cpu_base(struct gic_chip_data *data)
  96. {
  97. return data->get_base(&data->cpu_base);
  98. }
  99. static inline void gic_set_base_accessor(struct gic_chip_data *data,
  100. void __iomem *(*f)(union gic_base *))
  101. {
  102. data->get_base = f;
  103. }
  104. #else
  105. #define gic_data_dist_base(d) ((d)->dist_base.common_base)
  106. #define gic_data_cpu_base(d) ((d)->cpu_base.common_base)
  107. #define gic_set_base_accessor(d, f)
  108. #endif
  109. static inline void __iomem *gic_dist_base(struct irq_data *d)
  110. {
  111. struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
  112. return gic_data_dist_base(gic_data);
  113. }
  114. static inline void __iomem *gic_cpu_base(struct irq_data *d)
  115. {
  116. struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
  117. return gic_data_cpu_base(gic_data);
  118. }
  119. static inline unsigned int gic_irq(struct irq_data *d)
  120. {
  121. return d->hwirq;
  122. }
  123. /*
  124. * Routines to acknowledge, disable and enable interrupts
  125. */
  126. static void gic_poke_irq(struct irq_data *d, u32 offset)
  127. {
  128. u32 mask = 1 << (gic_irq(d) % 32);
  129. writel_relaxed(mask, gic_dist_base(d) + offset + (gic_irq(d) / 32) * 4);
  130. }
  131. static int gic_peek_irq(struct irq_data *d, u32 offset)
  132. {
  133. u32 mask = 1 << (gic_irq(d) % 32);
  134. return !!(readl_relaxed(gic_dist_base(d) + offset + (gic_irq(d) / 32) * 4) & mask);
  135. }
  136. static void gic_mask_irq(struct irq_data *d)
  137. {
  138. gic_poke_irq(d, GIC_DIST_ENABLE_CLEAR);
  139. }
  140. static void gic_unmask_irq(struct irq_data *d)
  141. {
  142. gic_poke_irq(d, GIC_DIST_ENABLE_SET);
  143. }
  144. static void gic_eoi_irq(struct irq_data *d)
  145. {
  146. writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI);
  147. }
  148. static int gic_irq_set_irqchip_state(struct irq_data *d,
  149. enum irqchip_irq_state which, bool val)
  150. {
  151. u32 reg;
  152. switch (which) {
  153. case IRQCHIP_STATE_PENDING:
  154. reg = val ? GIC_DIST_PENDING_SET : GIC_DIST_PENDING_CLEAR;
  155. break;
  156. case IRQCHIP_STATE_ACTIVE:
  157. reg = val ? GIC_DIST_ACTIVE_SET : GIC_DIST_ACTIVE_CLEAR;
  158. break;
  159. case IRQCHIP_STATE_MASKED:
  160. reg = val ? GIC_DIST_ENABLE_CLEAR : GIC_DIST_ENABLE_SET;
  161. break;
  162. default:
  163. return -EINVAL;
  164. }
  165. gic_poke_irq(d, reg);
  166. return 0;
  167. }
  168. static int gic_irq_get_irqchip_state(struct irq_data *d,
  169. enum irqchip_irq_state which, bool *val)
  170. {
  171. switch (which) {
  172. case IRQCHIP_STATE_PENDING:
  173. *val = gic_peek_irq(d, GIC_DIST_PENDING_SET);
  174. break;
  175. case IRQCHIP_STATE_ACTIVE:
  176. *val = gic_peek_irq(d, GIC_DIST_ACTIVE_SET);
  177. break;
  178. case IRQCHIP_STATE_MASKED:
  179. *val = !gic_peek_irq(d, GIC_DIST_ENABLE_SET);
  180. break;
  181. default:
  182. return -EINVAL;
  183. }
  184. return 0;
  185. }
  186. static int gic_set_type(struct irq_data *d, unsigned int type)
  187. {
  188. void __iomem *base = gic_dist_base(d);
  189. unsigned int gicirq = gic_irq(d);
  190. /* Interrupt configuration for SGIs can't be changed */
  191. if (gicirq < 16)
  192. return -EINVAL;
  193. /* SPIs have restrictions on the supported types */
  194. if (gicirq >= 32 && type != IRQ_TYPE_LEVEL_HIGH &&
  195. type != IRQ_TYPE_EDGE_RISING)
  196. return -EINVAL;
  197. return gic_configure_irq(gicirq, type, base, NULL);
  198. }
  199. #ifdef CONFIG_SMP
  200. static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
  201. bool force)
  202. {
  203. void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3);
  204. unsigned int cpu, shift = (gic_irq(d) % 4) * 8;
  205. u32 val, mask, bit;
  206. unsigned long flags;
  207. if (!force)
  208. cpu = cpumask_any_and(mask_val, cpu_online_mask);
  209. else
  210. cpu = cpumask_first(mask_val);
  211. if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)
  212. return -EINVAL;
  213. raw_spin_lock_irqsave(&irq_controller_lock, flags);
  214. mask = 0xff << shift;
  215. bit = gic_cpu_map[cpu] << shift;
  216. val = readl_relaxed(reg) & ~mask;
  217. writel_relaxed(val | bit, reg);
  218. raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
  219. return IRQ_SET_MASK_OK;
  220. }
  221. #endif
  222. static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
  223. {
  224. u32 irqstat, irqnr;
  225. struct gic_chip_data *gic = &gic_data[0];
  226. void __iomem *cpu_base = gic_data_cpu_base(gic);
  227. do {
  228. irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK);
  229. irqnr = irqstat & GICC_IAR_INT_ID_MASK;
  230. if (likely(irqnr > 15 && irqnr < 1021)) {
  231. handle_domain_irq(gic->domain, irqnr, regs);
  232. continue;
  233. }
  234. if (irqnr < 16) {
  235. writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
  236. #ifdef CONFIG_SMP
  237. handle_IPI(irqnr, regs);
  238. #endif
  239. continue;
  240. }
  241. break;
  242. } while (1);
  243. }
  244. static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
  245. {
  246. struct gic_chip_data *chip_data = irq_get_handler_data(irq);
  247. struct irq_chip *chip = irq_get_chip(irq);
  248. unsigned int cascade_irq, gic_irq;
  249. unsigned long status;
  250. chained_irq_enter(chip, desc);
  251. raw_spin_lock(&irq_controller_lock);
  252. status = readl_relaxed(gic_data_cpu_base(chip_data) + GIC_CPU_INTACK);
  253. raw_spin_unlock(&irq_controller_lock);
  254. gic_irq = (status & GICC_IAR_INT_ID_MASK);
  255. if (gic_irq == GICC_INT_SPURIOUS)
  256. goto out;
  257. cascade_irq = irq_find_mapping(chip_data->domain, gic_irq);
  258. if (unlikely(gic_irq < 32 || gic_irq > 1020))
  259. handle_bad_irq(cascade_irq, desc);
  260. else
  261. generic_handle_irq(cascade_irq);
  262. out:
  263. chained_irq_exit(chip, desc);
  264. }
  265. static struct irq_chip gic_chip = {
  266. .name = "GIC",
  267. .irq_mask = gic_mask_irq,
  268. .irq_unmask = gic_unmask_irq,
  269. .irq_eoi = gic_eoi_irq,
  270. .irq_set_type = gic_set_type,
  271. #ifdef CONFIG_SMP
  272. .irq_set_affinity = gic_set_affinity,
  273. #endif
  274. .irq_get_irqchip_state = gic_irq_get_irqchip_state,
  275. .irq_set_irqchip_state = gic_irq_set_irqchip_state,
  276. .flags = IRQCHIP_SET_TYPE_MASKED,
  277. };
  278. void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
  279. {
  280. if (gic_nr >= MAX_GIC_NR)
  281. BUG();
  282. if (irq_set_handler_data(irq, &gic_data[gic_nr]) != 0)
  283. BUG();
  284. irq_set_chained_handler(irq, gic_handle_cascade_irq);
  285. }
  286. static u8 gic_get_cpumask(struct gic_chip_data *gic)
  287. {
  288. void __iomem *base = gic_data_dist_base(gic);
  289. u32 mask, i;
  290. for (i = mask = 0; i < 32; i += 4) {
  291. mask = readl_relaxed(base + GIC_DIST_TARGET + i);
  292. mask |= mask >> 16;
  293. mask |= mask >> 8;
  294. if (mask)
  295. break;
  296. }
  297. if (!mask && num_possible_cpus() > 1)
  298. pr_crit("GIC CPU mask not found - kernel will fail to boot.\n");
  299. return mask;
  300. }
  301. static void gic_cpu_if_up(void)
  302. {
  303. void __iomem *cpu_base = gic_data_cpu_base(&gic_data[0]);
  304. u32 bypass = 0;
  305. /*
  306. * Preserve bypass disable bits to be written back later
  307. */
  308. bypass = readl(cpu_base + GIC_CPU_CTRL);
  309. bypass &= GICC_DIS_BYPASS_MASK;
  310. writel_relaxed(bypass | GICC_ENABLE, cpu_base + GIC_CPU_CTRL);
  311. }
  312. static void __init gic_dist_init(struct gic_chip_data *gic)
  313. {
  314. unsigned int i;
  315. u32 cpumask;
  316. unsigned int gic_irqs = gic->gic_irqs;
  317. void __iomem *base = gic_data_dist_base(gic);
  318. writel_relaxed(GICD_DISABLE, base + GIC_DIST_CTRL);
  319. /*
  320. * Set all global interrupts to this CPU only.
  321. */
  322. cpumask = gic_get_cpumask(gic);
  323. cpumask |= cpumask << 8;
  324. cpumask |= cpumask << 16;
  325. for (i = 32; i < gic_irqs; i += 4)
  326. writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);
  327. gic_dist_config(base, gic_irqs, NULL);
  328. writel_relaxed(GICD_ENABLE, base + GIC_DIST_CTRL);
  329. }
  330. static void gic_cpu_init(struct gic_chip_data *gic)
  331. {
  332. void __iomem *dist_base = gic_data_dist_base(gic);
  333. void __iomem *base = gic_data_cpu_base(gic);
  334. unsigned int cpu_mask, cpu = smp_processor_id();
  335. int i;
  336. /*
  337. * Get what the GIC says our CPU mask is.
  338. */
  339. BUG_ON(cpu >= NR_GIC_CPU_IF);
  340. cpu_mask = gic_get_cpumask(gic);
  341. gic_cpu_map[cpu] = cpu_mask;
  342. /*
  343. * Clear our mask from the other map entries in case they're
  344. * still undefined.
  345. */
  346. for (i = 0; i < NR_GIC_CPU_IF; i++)
  347. if (i != cpu)
  348. gic_cpu_map[i] &= ~cpu_mask;
  349. gic_cpu_config(dist_base, NULL);
  350. writel_relaxed(GICC_INT_PRI_THRESHOLD, base + GIC_CPU_PRIMASK);
  351. gic_cpu_if_up();
  352. }
  353. void gic_cpu_if_down(void)
  354. {
  355. void __iomem *cpu_base = gic_data_cpu_base(&gic_data[0]);
  356. u32 val = 0;
  357. val = readl(cpu_base + GIC_CPU_CTRL);
  358. val &= ~GICC_ENABLE;
  359. writel_relaxed(val, cpu_base + GIC_CPU_CTRL);
  360. }
  361. #ifdef CONFIG_CPU_PM
  362. /*
  363. * Saves the GIC distributor registers during suspend or idle. Must be called
  364. * with interrupts disabled but before powering down the GIC. After calling
  365. * this function, no interrupts will be delivered by the GIC, and another
  366. * platform-specific wakeup source must be enabled.
  367. */
  368. static void gic_dist_save(unsigned int gic_nr)
  369. {
  370. unsigned int gic_irqs;
  371. void __iomem *dist_base;
  372. int i;
  373. if (gic_nr >= MAX_GIC_NR)
  374. BUG();
  375. gic_irqs = gic_data[gic_nr].gic_irqs;
  376. dist_base = gic_data_dist_base(&gic_data[gic_nr]);
  377. if (!dist_base)
  378. return;
  379. for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
  380. gic_data[gic_nr].saved_spi_conf[i] =
  381. readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
  382. for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
  383. gic_data[gic_nr].saved_spi_target[i] =
  384. readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4);
  385. for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
  386. gic_data[gic_nr].saved_spi_enable[i] =
  387. readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
  388. }
  389. /*
  390. * Restores the GIC distributor registers during resume or when coming out of
  391. * idle. Must be called before enabling interrupts. If a level interrupt
  392. * that occured while the GIC was suspended is still present, it will be
  393. * handled normally, but any edge interrupts that occured will not be seen by
  394. * the GIC and need to be handled by the platform-specific wakeup source.
  395. */
  396. static void gic_dist_restore(unsigned int gic_nr)
  397. {
  398. unsigned int gic_irqs;
  399. unsigned int i;
  400. void __iomem *dist_base;
  401. if (gic_nr >= MAX_GIC_NR)
  402. BUG();
  403. gic_irqs = gic_data[gic_nr].gic_irqs;
  404. dist_base = gic_data_dist_base(&gic_data[gic_nr]);
  405. if (!dist_base)
  406. return;
  407. writel_relaxed(GICD_DISABLE, dist_base + GIC_DIST_CTRL);
  408. for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
  409. writel_relaxed(gic_data[gic_nr].saved_spi_conf[i],
  410. dist_base + GIC_DIST_CONFIG + i * 4);
  411. for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
  412. writel_relaxed(GICD_INT_DEF_PRI_X4,
  413. dist_base + GIC_DIST_PRI + i * 4);
  414. for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
  415. writel_relaxed(gic_data[gic_nr].saved_spi_target[i],
  416. dist_base + GIC_DIST_TARGET + i * 4);
  417. for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
  418. writel_relaxed(gic_data[gic_nr].saved_spi_enable[i],
  419. dist_base + GIC_DIST_ENABLE_SET + i * 4);
  420. writel_relaxed(GICD_ENABLE, dist_base + GIC_DIST_CTRL);
  421. }
  422. static void gic_cpu_save(unsigned int gic_nr)
  423. {
  424. int i;
  425. u32 *ptr;
  426. void __iomem *dist_base;
  427. void __iomem *cpu_base;
  428. if (gic_nr >= MAX_GIC_NR)
  429. BUG();
  430. dist_base = gic_data_dist_base(&gic_data[gic_nr]);
  431. cpu_base = gic_data_cpu_base(&gic_data[gic_nr]);
  432. if (!dist_base || !cpu_base)
  433. return;
  434. ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_enable);
  435. for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
  436. ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
  437. ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
  438. for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
  439. ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
  440. }
  441. static void gic_cpu_restore(unsigned int gic_nr)
  442. {
  443. int i;
  444. u32 *ptr;
  445. void __iomem *dist_base;
  446. void __iomem *cpu_base;
  447. if (gic_nr >= MAX_GIC_NR)
  448. BUG();
  449. dist_base = gic_data_dist_base(&gic_data[gic_nr]);
  450. cpu_base = gic_data_cpu_base(&gic_data[gic_nr]);
  451. if (!dist_base || !cpu_base)
  452. return;
  453. ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_enable);
  454. for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
  455. writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4);
  456. ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
  457. for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
  458. writel_relaxed(ptr[i], dist_base + GIC_DIST_CONFIG + i * 4);
  459. for (i = 0; i < DIV_ROUND_UP(32, 4); i++)
  460. writel_relaxed(GICD_INT_DEF_PRI_X4,
  461. dist_base + GIC_DIST_PRI + i * 4);
  462. writel_relaxed(GICC_INT_PRI_THRESHOLD, cpu_base + GIC_CPU_PRIMASK);
  463. gic_cpu_if_up();
  464. }
  465. static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v)
  466. {
  467. int i;
  468. for (i = 0; i < MAX_GIC_NR; i++) {
  469. #ifdef CONFIG_GIC_NON_BANKED
  470. /* Skip over unused GICs */
  471. if (!gic_data[i].get_base)
  472. continue;
  473. #endif
  474. switch (cmd) {
  475. case CPU_PM_ENTER:
  476. gic_cpu_save(i);
  477. break;
  478. case CPU_PM_ENTER_FAILED:
  479. case CPU_PM_EXIT:
  480. gic_cpu_restore(i);
  481. break;
  482. case CPU_CLUSTER_PM_ENTER:
  483. gic_dist_save(i);
  484. break;
  485. case CPU_CLUSTER_PM_ENTER_FAILED:
  486. case CPU_CLUSTER_PM_EXIT:
  487. gic_dist_restore(i);
  488. break;
  489. }
  490. }
  491. return NOTIFY_OK;
  492. }
  493. static struct notifier_block gic_notifier_block = {
  494. .notifier_call = gic_notifier,
  495. };
  496. static void __init gic_pm_init(struct gic_chip_data *gic)
  497. {
  498. gic->saved_ppi_enable = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4,
  499. sizeof(u32));
  500. BUG_ON(!gic->saved_ppi_enable);
  501. gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4,
  502. sizeof(u32));
  503. BUG_ON(!gic->saved_ppi_conf);
  504. if (gic == &gic_data[0])
  505. cpu_pm_register_notifier(&gic_notifier_block);
  506. }
  507. #else
  508. static void __init gic_pm_init(struct gic_chip_data *gic)
  509. {
  510. }
  511. #endif
  512. #ifdef CONFIG_SMP
  513. static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
  514. {
  515. int cpu;
  516. unsigned long flags, map = 0;
  517. raw_spin_lock_irqsave(&irq_controller_lock, flags);
  518. /* Convert our logical CPU mask into a physical one. */
  519. for_each_cpu(cpu, mask)
  520. map |= gic_cpu_map[cpu];
  521. /*
  522. * Ensure that stores to Normal memory are visible to the
  523. * other CPUs before they observe us issuing the IPI.
  524. */
  525. dmb(ishst);
  526. /* this always happens on GIC0 */
  527. writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
  528. raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
  529. }
  530. #endif
  531. #ifdef CONFIG_BL_SWITCHER
  532. /*
  533. * gic_send_sgi - send a SGI directly to given CPU interface number
  534. *
  535. * cpu_id: the ID for the destination CPU interface
  536. * irq: the IPI number to send a SGI for
  537. */
  538. void gic_send_sgi(unsigned int cpu_id, unsigned int irq)
  539. {
  540. BUG_ON(cpu_id >= NR_GIC_CPU_IF);
  541. cpu_id = 1 << cpu_id;
  542. /* this always happens on GIC0 */
  543. writel_relaxed((cpu_id << 16) | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
  544. }
  545. /*
  546. * gic_get_cpu_id - get the CPU interface ID for the specified CPU
  547. *
  548. * @cpu: the logical CPU number to get the GIC ID for.
  549. *
  550. * Return the CPU interface ID for the given logical CPU number,
  551. * or -1 if the CPU number is too large or the interface ID is
  552. * unknown (more than one bit set).
  553. */
  554. int gic_get_cpu_id(unsigned int cpu)
  555. {
  556. unsigned int cpu_bit;
  557. if (cpu >= NR_GIC_CPU_IF)
  558. return -1;
  559. cpu_bit = gic_cpu_map[cpu];
  560. if (cpu_bit & (cpu_bit - 1))
  561. return -1;
  562. return __ffs(cpu_bit);
  563. }
  564. /*
  565. * gic_migrate_target - migrate IRQs to another CPU interface
  566. *
  567. * @new_cpu_id: the CPU target ID to migrate IRQs to
  568. *
  569. * Migrate all peripheral interrupts with a target matching the current CPU
  570. * to the interface corresponding to @new_cpu_id. The CPU interface mapping
  571. * is also updated. Targets to other CPU interfaces are unchanged.
  572. * This must be called with IRQs locally disabled.
  573. */
  574. void gic_migrate_target(unsigned int new_cpu_id)
  575. {
  576. unsigned int cur_cpu_id, gic_irqs, gic_nr = 0;
  577. void __iomem *dist_base;
  578. int i, ror_val, cpu = smp_processor_id();
  579. u32 val, cur_target_mask, active_mask;
  580. if (gic_nr >= MAX_GIC_NR)
  581. BUG();
  582. dist_base = gic_data_dist_base(&gic_data[gic_nr]);
  583. if (!dist_base)
  584. return;
  585. gic_irqs = gic_data[gic_nr].gic_irqs;
  586. cur_cpu_id = __ffs(gic_cpu_map[cpu]);
  587. cur_target_mask = 0x01010101 << cur_cpu_id;
  588. ror_val = (cur_cpu_id - new_cpu_id) & 31;
  589. raw_spin_lock(&irq_controller_lock);
  590. /* Update the target interface for this logical CPU */
  591. gic_cpu_map[cpu] = 1 << new_cpu_id;
  592. /*
  593. * Find all the peripheral interrupts targetting the current
  594. * CPU interface and migrate them to the new CPU interface.
  595. * We skip DIST_TARGET 0 to 7 as they are read-only.
  596. */
  597. for (i = 8; i < DIV_ROUND_UP(gic_irqs, 4); i++) {
  598. val = readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4);
  599. active_mask = val & cur_target_mask;
  600. if (active_mask) {
  601. val &= ~active_mask;
  602. val |= ror32(active_mask, ror_val);
  603. writel_relaxed(val, dist_base + GIC_DIST_TARGET + i*4);
  604. }
  605. }
  606. raw_spin_unlock(&irq_controller_lock);
  607. /*
  608. * Now let's migrate and clear any potential SGIs that might be
  609. * pending for us (cur_cpu_id). Since GIC_DIST_SGI_PENDING_SET
  610. * is a banked register, we can only forward the SGI using
  611. * GIC_DIST_SOFTINT. The original SGI source is lost but Linux
  612. * doesn't use that information anyway.
  613. *
  614. * For the same reason we do not adjust SGI source information
  615. * for previously sent SGIs by us to other CPUs either.
  616. */
  617. for (i = 0; i < 16; i += 4) {
  618. int j;
  619. val = readl_relaxed(dist_base + GIC_DIST_SGI_PENDING_SET + i);
  620. if (!val)
  621. continue;
  622. writel_relaxed(val, dist_base + GIC_DIST_SGI_PENDING_CLEAR + i);
  623. for (j = i; j < i + 4; j++) {
  624. if (val & 0xff)
  625. writel_relaxed((1 << (new_cpu_id + 16)) | j,
  626. dist_base + GIC_DIST_SOFTINT);
  627. val >>= 8;
  628. }
  629. }
  630. }
  631. /*
  632. * gic_get_sgir_physaddr - get the physical address for the SGI register
  633. *
  634. * REturn the physical address of the SGI register to be used
  635. * by some early assembly code when the kernel is not yet available.
  636. */
  637. static unsigned long gic_dist_physaddr;
  638. unsigned long gic_get_sgir_physaddr(void)
  639. {
  640. if (!gic_dist_physaddr)
  641. return 0;
  642. return gic_dist_physaddr + GIC_DIST_SOFTINT;
  643. }
  644. void __init gic_init_physaddr(struct device_node *node)
  645. {
  646. struct resource res;
  647. if (of_address_to_resource(node, 0, &res) == 0) {
  648. gic_dist_physaddr = res.start;
  649. pr_info("GIC physical location is %#lx\n", gic_dist_physaddr);
  650. }
  651. }
  652. #else
  653. #define gic_init_physaddr(node) do { } while (0)
  654. #endif
  655. static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
  656. irq_hw_number_t hw)
  657. {
  658. if (hw < 32) {
  659. irq_set_percpu_devid(irq);
  660. irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data,
  661. handle_percpu_devid_irq, NULL, NULL);
  662. set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN);
  663. } else {
  664. irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data,
  665. handle_fasteoi_irq, NULL, NULL);
  666. set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
  667. }
  668. return 0;
  669. }
  670. static void gic_irq_domain_unmap(struct irq_domain *d, unsigned int irq)
  671. {
  672. }
  673. static int gic_irq_domain_xlate(struct irq_domain *d,
  674. struct device_node *controller,
  675. const u32 *intspec, unsigned int intsize,
  676. unsigned long *out_hwirq, unsigned int *out_type)
  677. {
  678. unsigned long ret = 0;
  679. if (d->of_node != controller)
  680. return -EINVAL;
  681. if (intsize < 3)
  682. return -EINVAL;
  683. /* Get the interrupt number and add 16 to skip over SGIs */
  684. *out_hwirq = intspec[1] + 16;
  685. /* For SPIs, we need to add 16 more to get the GIC irq ID number */
  686. if (!intspec[0])
  687. *out_hwirq += 16;
  688. *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
  689. return ret;
  690. }
  691. #ifdef CONFIG_SMP
  692. static int gic_secondary_init(struct notifier_block *nfb, unsigned long action,
  693. void *hcpu)
  694. {
  695. if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
  696. gic_cpu_init(&gic_data[0]);
  697. return NOTIFY_OK;
  698. }
  699. /*
  700. * Notifier for enabling the GIC CPU interface. Set an arbitrarily high
  701. * priority because the GIC needs to be up before the ARM generic timers.
  702. */
  703. static struct notifier_block gic_cpu_notifier = {
  704. .notifier_call = gic_secondary_init,
  705. .priority = 100,
  706. };
  707. #endif
  708. static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
  709. unsigned int nr_irqs, void *arg)
  710. {
  711. int i, ret;
  712. irq_hw_number_t hwirq;
  713. unsigned int type = IRQ_TYPE_NONE;
  714. struct of_phandle_args *irq_data = arg;
  715. ret = gic_irq_domain_xlate(domain, irq_data->np, irq_data->args,
  716. irq_data->args_count, &hwirq, &type);
  717. if (ret)
  718. return ret;
  719. for (i = 0; i < nr_irqs; i++)
  720. gic_irq_domain_map(domain, virq + i, hwirq + i);
  721. return 0;
  722. }
  723. static const struct irq_domain_ops gic_irq_domain_hierarchy_ops = {
  724. .xlate = gic_irq_domain_xlate,
  725. .alloc = gic_irq_domain_alloc,
  726. .free = irq_domain_free_irqs_top,
  727. };
  728. static const struct irq_domain_ops gic_irq_domain_ops = {
  729. .map = gic_irq_domain_map,
  730. .unmap = gic_irq_domain_unmap,
  731. .xlate = gic_irq_domain_xlate,
  732. };
  733. void gic_set_irqchip_flags(unsigned long flags)
  734. {
  735. gic_chip.flags |= flags;
  736. }
  737. void __init gic_init_bases(unsigned int gic_nr, int irq_start,
  738. void __iomem *dist_base, void __iomem *cpu_base,
  739. u32 percpu_offset, struct device_node *node)
  740. {
  741. irq_hw_number_t hwirq_base;
  742. struct gic_chip_data *gic;
  743. int gic_irqs, irq_base, i;
  744. BUG_ON(gic_nr >= MAX_GIC_NR);
  745. gic = &gic_data[gic_nr];
  746. #ifdef CONFIG_GIC_NON_BANKED
  747. if (percpu_offset) { /* Frankein-GIC without banked registers... */
  748. unsigned int cpu;
  749. gic->dist_base.percpu_base = alloc_percpu(void __iomem *);
  750. gic->cpu_base.percpu_base = alloc_percpu(void __iomem *);
  751. if (WARN_ON(!gic->dist_base.percpu_base ||
  752. !gic->cpu_base.percpu_base)) {
  753. free_percpu(gic->dist_base.percpu_base);
  754. free_percpu(gic->cpu_base.percpu_base);
  755. return;
  756. }
  757. for_each_possible_cpu(cpu) {
  758. u32 mpidr = cpu_logical_map(cpu);
  759. u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
  760. unsigned long offset = percpu_offset * core_id;
  761. *per_cpu_ptr(gic->dist_base.percpu_base, cpu) = dist_base + offset;
  762. *per_cpu_ptr(gic->cpu_base.percpu_base, cpu) = cpu_base + offset;
  763. }
  764. gic_set_base_accessor(gic, gic_get_percpu_base);
  765. } else
  766. #endif
  767. { /* Normal, sane GIC... */
  768. WARN(percpu_offset,
  769. "GIC_NON_BANKED not enabled, ignoring %08x offset!",
  770. percpu_offset);
  771. gic->dist_base.common_base = dist_base;
  772. gic->cpu_base.common_base = cpu_base;
  773. gic_set_base_accessor(gic, gic_get_common_base);
  774. }
  775. /*
  776. * Initialize the CPU interface map to all CPUs.
  777. * It will be refined as each CPU probes its ID.
  778. */
  779. for (i = 0; i < NR_GIC_CPU_IF; i++)
  780. gic_cpu_map[i] = 0xff;
  781. /*
  782. * Find out how many interrupts are supported.
  783. * The GIC only supports up to 1020 interrupt sources.
  784. */
  785. gic_irqs = readl_relaxed(gic_data_dist_base(gic) + GIC_DIST_CTR) & 0x1f;
  786. gic_irqs = (gic_irqs + 1) * 32;
  787. if (gic_irqs > 1020)
  788. gic_irqs = 1020;
  789. gic->gic_irqs = gic_irqs;
  790. if (node) { /* DT case */
  791. gic->domain = irq_domain_add_linear(node, gic_irqs,
  792. &gic_irq_domain_hierarchy_ops,
  793. gic);
  794. } else { /* Non-DT case */
  795. /*
  796. * For primary GICs, skip over SGIs.
  797. * For secondary GICs, skip over PPIs, too.
  798. */
  799. if (gic_nr == 0 && (irq_start & 31) > 0) {
  800. hwirq_base = 16;
  801. if (irq_start != -1)
  802. irq_start = (irq_start & ~31) + 16;
  803. } else {
  804. hwirq_base = 32;
  805. }
  806. gic_irqs -= hwirq_base; /* calculate # of irqs to allocate */
  807. irq_base = irq_alloc_descs(irq_start, 16, gic_irqs,
  808. numa_node_id());
  809. if (IS_ERR_VALUE(irq_base)) {
  810. WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n",
  811. irq_start);
  812. irq_base = irq_start;
  813. }
  814. gic->domain = irq_domain_add_legacy(node, gic_irqs, irq_base,
  815. hwirq_base, &gic_irq_domain_ops, gic);
  816. }
  817. if (WARN_ON(!gic->domain))
  818. return;
  819. if (gic_nr == 0) {
  820. #ifdef CONFIG_SMP
  821. set_smp_cross_call(gic_raise_softirq);
  822. register_cpu_notifier(&gic_cpu_notifier);
  823. #endif
  824. set_handle_irq(gic_handle_irq);
  825. }
  826. gic_dist_init(gic);
  827. gic_cpu_init(gic);
  828. gic_pm_init(gic);
  829. }
  830. #ifdef CONFIG_OF
  831. static int gic_cnt __initdata;
  832. static int __init
  833. gic_of_init(struct device_node *node, struct device_node *parent)
  834. {
  835. void __iomem *cpu_base;
  836. void __iomem *dist_base;
  837. u32 percpu_offset;
  838. int irq;
  839. if (WARN_ON(!node))
  840. return -ENODEV;
  841. dist_base = of_iomap(node, 0);
  842. WARN(!dist_base, "unable to map gic dist registers\n");
  843. cpu_base = of_iomap(node, 1);
  844. WARN(!cpu_base, "unable to map gic cpu registers\n");
  845. if (of_property_read_u32(node, "cpu-offset", &percpu_offset))
  846. percpu_offset = 0;
  847. gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset, node);
  848. if (!gic_cnt)
  849. gic_init_physaddr(node);
  850. if (parent) {
  851. irq = irq_of_parse_and_map(node, 0);
  852. gic_cascade_irq(gic_cnt, irq);
  853. }
  854. if (IS_ENABLED(CONFIG_ARM_GIC_V2M))
  855. gicv2m_of_init(node, gic_data[gic_cnt].domain);
  856. gic_cnt++;
  857. return 0;
  858. }
  859. IRQCHIP_DECLARE(gic_400, "arm,gic-400", gic_of_init);
  860. IRQCHIP_DECLARE(arm11mp_gic, "arm,arm11mp-gic", gic_of_init);
  861. IRQCHIP_DECLARE(arm1176jzf_dc_gic, "arm,arm1176jzf-devchip-gic", gic_of_init);
  862. IRQCHIP_DECLARE(cortex_a15_gic, "arm,cortex-a15-gic", gic_of_init);
  863. IRQCHIP_DECLARE(cortex_a9_gic, "arm,cortex-a9-gic", gic_of_init);
  864. IRQCHIP_DECLARE(cortex_a7_gic, "arm,cortex-a7-gic", gic_of_init);
  865. IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init);
  866. IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init);
  867. #endif
  868. #ifdef CONFIG_ACPI
  869. static phys_addr_t dist_phy_base, cpu_phy_base __initdata;
  870. static int __init
  871. gic_acpi_parse_madt_cpu(struct acpi_subtable_header *header,
  872. const unsigned long end)
  873. {
  874. struct acpi_madt_generic_interrupt *processor;
  875. phys_addr_t gic_cpu_base;
  876. static int cpu_base_assigned;
  877. processor = (struct acpi_madt_generic_interrupt *)header;
  878. if (BAD_MADT_ENTRY(processor, end))
  879. return -EINVAL;
  880. /*
  881. * There is no support for non-banked GICv1/2 register in ACPI spec.
  882. * All CPU interface addresses have to be the same.
  883. */
  884. gic_cpu_base = processor->base_address;
  885. if (cpu_base_assigned && gic_cpu_base != cpu_phy_base)
  886. return -EINVAL;
  887. cpu_phy_base = gic_cpu_base;
  888. cpu_base_assigned = 1;
  889. return 0;
  890. }
  891. static int __init
  892. gic_acpi_parse_madt_distributor(struct acpi_subtable_header *header,
  893. const unsigned long end)
  894. {
  895. struct acpi_madt_generic_distributor *dist;
  896. dist = (struct acpi_madt_generic_distributor *)header;
  897. if (BAD_MADT_ENTRY(dist, end))
  898. return -EINVAL;
  899. dist_phy_base = dist->base_address;
  900. return 0;
  901. }
  902. int __init
  903. gic_v2_acpi_init(struct acpi_table_header *table)
  904. {
  905. void __iomem *cpu_base, *dist_base;
  906. int count;
  907. /* Collect CPU base addresses */
  908. count = acpi_parse_entries(ACPI_SIG_MADT,
  909. sizeof(struct acpi_table_madt),
  910. gic_acpi_parse_madt_cpu, table,
  911. ACPI_MADT_TYPE_GENERIC_INTERRUPT, 0);
  912. if (count <= 0) {
  913. pr_err("No valid GICC entries exist\n");
  914. return -EINVAL;
  915. }
  916. /*
  917. * Find distributor base address. We expect one distributor entry since
  918. * ACPI 5.1 spec neither support multi-GIC instances nor GIC cascade.
  919. */
  920. count = acpi_parse_entries(ACPI_SIG_MADT,
  921. sizeof(struct acpi_table_madt),
  922. gic_acpi_parse_madt_distributor, table,
  923. ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR, 0);
  924. if (count <= 0) {
  925. pr_err("No valid GICD entries exist\n");
  926. return -EINVAL;
  927. } else if (count > 1) {
  928. pr_err("More than one GICD entry detected\n");
  929. return -EINVAL;
  930. }
  931. cpu_base = ioremap(cpu_phy_base, ACPI_GIC_CPU_IF_MEM_SIZE);
  932. if (!cpu_base) {
  933. pr_err("Unable to map GICC registers\n");
  934. return -ENOMEM;
  935. }
  936. dist_base = ioremap(dist_phy_base, ACPI_GICV2_DIST_MEM_SIZE);
  937. if (!dist_base) {
  938. pr_err("Unable to map GICD registers\n");
  939. iounmap(cpu_base);
  940. return -ENOMEM;
  941. }
  942. /*
  943. * Initialize zero GIC instance (no multi-GIC support). Also, set GIC
  944. * as default IRQ domain to allow for GSI registration and GSI to IRQ
  945. * number translation (see acpi_register_gsi() and acpi_gsi_to_irq()).
  946. */
  947. gic_init_bases(0, -1, dist_base, cpu_base, 0, NULL);
  948. irq_set_default_host(gic_data[0].domain);
  949. acpi_irq_model = ACPI_IRQ_MODEL_GIC;
  950. return 0;
  951. }
  952. #endif