irq-mips-gic.c 26 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 2008 Ralf Baechle (ralf@linux-mips.org)
  7. * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
  8. */
  9. #include <linux/bitmap.h>
  10. #include <linux/clocksource.h>
  11. #include <linux/init.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/irq.h>
  14. #include <linux/irqchip.h>
  15. #include <linux/irqchip/mips-gic.h>
  16. #include <linux/of_address.h>
  17. #include <linux/sched.h>
  18. #include <linux/smp.h>
  19. #include <asm/mips-cm.h>
  20. #include <asm/setup.h>
  21. #include <asm/traps.h>
  22. #include <dt-bindings/interrupt-controller/mips-gic.h>
  23. unsigned int gic_present;
  24. struct gic_pcpu_mask {
  25. DECLARE_BITMAP(pcpu_mask, GIC_MAX_INTRS);
  26. };
  27. static unsigned long __gic_base_addr;
  28. static void __iomem *gic_base;
  29. static struct gic_pcpu_mask pcpu_masks[NR_CPUS];
  30. static DEFINE_SPINLOCK(gic_lock);
  31. static struct irq_domain *gic_irq_domain;
  32. static struct irq_domain *gic_ipi_domain;
  33. static int gic_shared_intrs;
  34. static int gic_vpes;
  35. static unsigned int gic_cpu_pin;
  36. static unsigned int timer_cpu_pin;
  37. static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller;
  38. DECLARE_BITMAP(ipi_resrv, GIC_MAX_INTRS);
  39. DECLARE_BITMAP(ipi_available, GIC_MAX_INTRS);
  40. static void __gic_irq_dispatch(void);
  41. static inline u32 gic_read32(unsigned int reg)
  42. {
  43. return __raw_readl(gic_base + reg);
  44. }
  45. static inline u64 gic_read64(unsigned int reg)
  46. {
  47. return __raw_readq(gic_base + reg);
  48. }
  49. static inline unsigned long gic_read(unsigned int reg)
  50. {
  51. if (!mips_cm_is64)
  52. return gic_read32(reg);
  53. else
  54. return gic_read64(reg);
  55. }
  56. static inline void gic_write32(unsigned int reg, u32 val)
  57. {
  58. return __raw_writel(val, gic_base + reg);
  59. }
  60. static inline void gic_write64(unsigned int reg, u64 val)
  61. {
  62. return __raw_writeq(val, gic_base + reg);
  63. }
  64. static inline void gic_write(unsigned int reg, unsigned long val)
  65. {
  66. if (!mips_cm_is64)
  67. return gic_write32(reg, (u32)val);
  68. else
  69. return gic_write64(reg, (u64)val);
  70. }
  71. static inline void gic_update_bits(unsigned int reg, unsigned long mask,
  72. unsigned long val)
  73. {
  74. unsigned long regval;
  75. regval = gic_read(reg);
  76. regval &= ~mask;
  77. regval |= val;
  78. gic_write(reg, regval);
  79. }
  80. static inline void gic_reset_mask(unsigned int intr)
  81. {
  82. gic_write(GIC_REG(SHARED, GIC_SH_RMASK) + GIC_INTR_OFS(intr),
  83. 1ul << GIC_INTR_BIT(intr));
  84. }
  85. static inline void gic_set_mask(unsigned int intr)
  86. {
  87. gic_write(GIC_REG(SHARED, GIC_SH_SMASK) + GIC_INTR_OFS(intr),
  88. 1ul << GIC_INTR_BIT(intr));
  89. }
  90. static inline void gic_set_polarity(unsigned int intr, unsigned int pol)
  91. {
  92. gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_POLARITY) +
  93. GIC_INTR_OFS(intr), 1ul << GIC_INTR_BIT(intr),
  94. (unsigned long)pol << GIC_INTR_BIT(intr));
  95. }
  96. static inline void gic_set_trigger(unsigned int intr, unsigned int trig)
  97. {
  98. gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_TRIGGER) +
  99. GIC_INTR_OFS(intr), 1ul << GIC_INTR_BIT(intr),
  100. (unsigned long)trig << GIC_INTR_BIT(intr));
  101. }
  102. static inline void gic_set_dual_edge(unsigned int intr, unsigned int dual)
  103. {
  104. gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_DUAL) + GIC_INTR_OFS(intr),
  105. 1ul << GIC_INTR_BIT(intr),
  106. (unsigned long)dual << GIC_INTR_BIT(intr));
  107. }
  108. static inline void gic_map_to_pin(unsigned int intr, unsigned int pin)
  109. {
  110. gic_write32(GIC_REG(SHARED, GIC_SH_INTR_MAP_TO_PIN_BASE) +
  111. GIC_SH_MAP_TO_PIN(intr), GIC_MAP_TO_PIN_MSK | pin);
  112. }
  113. static inline void gic_map_to_vpe(unsigned int intr, unsigned int vpe)
  114. {
  115. gic_write(GIC_REG(SHARED, GIC_SH_INTR_MAP_TO_VPE_BASE) +
  116. GIC_SH_MAP_TO_VPE_REG_OFF(intr, vpe),
  117. GIC_SH_MAP_TO_VPE_REG_BIT(vpe));
  118. }
  119. #ifdef CONFIG_CLKSRC_MIPS_GIC
  120. u64 gic_read_count(void)
  121. {
  122. unsigned int hi, hi2, lo;
  123. if (mips_cm_is64)
  124. return (u64)gic_read(GIC_REG(SHARED, GIC_SH_COUNTER));
  125. do {
  126. hi = gic_read32(GIC_REG(SHARED, GIC_SH_COUNTER_63_32));
  127. lo = gic_read32(GIC_REG(SHARED, GIC_SH_COUNTER_31_00));
  128. hi2 = gic_read32(GIC_REG(SHARED, GIC_SH_COUNTER_63_32));
  129. } while (hi2 != hi);
  130. return (((u64) hi) << 32) + lo;
  131. }
  132. unsigned int gic_get_count_width(void)
  133. {
  134. unsigned int bits, config;
  135. config = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
  136. bits = 32 + 4 * ((config & GIC_SH_CONFIG_COUNTBITS_MSK) >>
  137. GIC_SH_CONFIG_COUNTBITS_SHF);
  138. return bits;
  139. }
  140. void gic_write_compare(u64 cnt)
  141. {
  142. if (mips_cm_is64) {
  143. gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE), cnt);
  144. } else {
  145. gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI),
  146. (int)(cnt >> 32));
  147. gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO),
  148. (int)(cnt & 0xffffffff));
  149. }
  150. }
  151. void gic_write_cpu_compare(u64 cnt, int cpu)
  152. {
  153. unsigned long flags;
  154. local_irq_save(flags);
  155. gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), mips_cm_vp_id(cpu));
  156. if (mips_cm_is64) {
  157. gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE), cnt);
  158. } else {
  159. gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_HI),
  160. (int)(cnt >> 32));
  161. gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_LO),
  162. (int)(cnt & 0xffffffff));
  163. }
  164. local_irq_restore(flags);
  165. }
  166. u64 gic_read_compare(void)
  167. {
  168. unsigned int hi, lo;
  169. if (mips_cm_is64)
  170. return (u64)gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE));
  171. hi = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI));
  172. lo = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO));
  173. return (((u64) hi) << 32) + lo;
  174. }
  175. void gic_start_count(void)
  176. {
  177. u32 gicconfig;
  178. /* Start the counter */
  179. gicconfig = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
  180. gicconfig &= ~(1 << GIC_SH_CONFIG_COUNTSTOP_SHF);
  181. gic_write(GIC_REG(SHARED, GIC_SH_CONFIG), gicconfig);
  182. }
  183. void gic_stop_count(void)
  184. {
  185. u32 gicconfig;
  186. /* Stop the counter */
  187. gicconfig = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
  188. gicconfig |= 1 << GIC_SH_CONFIG_COUNTSTOP_SHF;
  189. gic_write(GIC_REG(SHARED, GIC_SH_CONFIG), gicconfig);
  190. }
  191. #endif
  192. unsigned gic_read_local_vp_id(void)
  193. {
  194. unsigned long ident;
  195. ident = gic_read(GIC_REG(VPE_LOCAL, GIC_VP_IDENT));
  196. return ident & GIC_VP_IDENT_VCNUM_MSK;
  197. }
  198. static bool gic_local_irq_is_routable(int intr)
  199. {
  200. u32 vpe_ctl;
  201. /* All local interrupts are routable in EIC mode. */
  202. if (cpu_has_veic)
  203. return true;
  204. vpe_ctl = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_CTL));
  205. switch (intr) {
  206. case GIC_LOCAL_INT_TIMER:
  207. return vpe_ctl & GIC_VPE_CTL_TIMER_RTBL_MSK;
  208. case GIC_LOCAL_INT_PERFCTR:
  209. return vpe_ctl & GIC_VPE_CTL_PERFCNT_RTBL_MSK;
  210. case GIC_LOCAL_INT_FDC:
  211. return vpe_ctl & GIC_VPE_CTL_FDC_RTBL_MSK;
  212. case GIC_LOCAL_INT_SWINT0:
  213. case GIC_LOCAL_INT_SWINT1:
  214. return vpe_ctl & GIC_VPE_CTL_SWINT_RTBL_MSK;
  215. default:
  216. return true;
  217. }
  218. }
  219. static void gic_bind_eic_interrupt(int irq, int set)
  220. {
  221. /* Convert irq vector # to hw int # */
  222. irq -= GIC_PIN_TO_VEC_OFFSET;
  223. /* Set irq to use shadow set */
  224. gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_EIC_SHADOW_SET_BASE) +
  225. GIC_VPE_EIC_SS(irq), set);
  226. }
  227. static void gic_send_ipi(struct irq_data *d, unsigned int cpu)
  228. {
  229. irq_hw_number_t hwirq = GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(d));
  230. gic_write(GIC_REG(SHARED, GIC_SH_WEDGE), GIC_SH_WEDGE_SET(hwirq));
  231. }
  232. int gic_get_c0_compare_int(void)
  233. {
  234. if (!gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER))
  235. return MIPS_CPU_IRQ_BASE + cp0_compare_irq;
  236. return irq_create_mapping(gic_irq_domain,
  237. GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_TIMER));
  238. }
  239. int gic_get_c0_perfcount_int(void)
  240. {
  241. if (!gic_local_irq_is_routable(GIC_LOCAL_INT_PERFCTR)) {
  242. /* Is the performance counter shared with the timer? */
  243. if (cp0_perfcount_irq < 0)
  244. return -1;
  245. return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
  246. }
  247. return irq_create_mapping(gic_irq_domain,
  248. GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_PERFCTR));
  249. }
  250. int gic_get_c0_fdc_int(void)
  251. {
  252. if (!gic_local_irq_is_routable(GIC_LOCAL_INT_FDC)) {
  253. /* Is the FDC IRQ even present? */
  254. if (cp0_fdc_irq < 0)
  255. return -1;
  256. return MIPS_CPU_IRQ_BASE + cp0_fdc_irq;
  257. }
  258. return irq_create_mapping(gic_irq_domain,
  259. GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_FDC));
  260. }
  261. int gic_get_usm_range(struct resource *gic_usm_res)
  262. {
  263. if (!gic_present)
  264. return -1;
  265. gic_usm_res->start = __gic_base_addr + USM_VISIBLE_SECTION_OFS;
  266. gic_usm_res->end = gic_usm_res->start + (USM_VISIBLE_SECTION_SIZE - 1);
  267. return 0;
  268. }
  269. static void gic_handle_shared_int(bool chained)
  270. {
  271. unsigned int i, intr, virq, gic_reg_step = mips_cm_is64 ? 8 : 4;
  272. unsigned long *pcpu_mask;
  273. unsigned long pending_reg, intrmask_reg;
  274. DECLARE_BITMAP(pending, GIC_MAX_INTRS);
  275. DECLARE_BITMAP(intrmask, GIC_MAX_INTRS);
  276. /* Get per-cpu bitmaps */
  277. pcpu_mask = pcpu_masks[smp_processor_id()].pcpu_mask;
  278. pending_reg = GIC_REG(SHARED, GIC_SH_PEND);
  279. intrmask_reg = GIC_REG(SHARED, GIC_SH_MASK);
  280. for (i = 0; i < BITS_TO_LONGS(gic_shared_intrs); i++) {
  281. pending[i] = gic_read(pending_reg);
  282. intrmask[i] = gic_read(intrmask_reg);
  283. pending_reg += gic_reg_step;
  284. intrmask_reg += gic_reg_step;
  285. if (!IS_ENABLED(CONFIG_64BIT) || mips_cm_is64)
  286. continue;
  287. pending[i] |= (u64)gic_read(pending_reg) << 32;
  288. intrmask[i] |= (u64)gic_read(intrmask_reg) << 32;
  289. pending_reg += gic_reg_step;
  290. intrmask_reg += gic_reg_step;
  291. }
  292. bitmap_and(pending, pending, intrmask, gic_shared_intrs);
  293. bitmap_and(pending, pending, pcpu_mask, gic_shared_intrs);
  294. for_each_set_bit(intr, pending, gic_shared_intrs) {
  295. virq = irq_linear_revmap(gic_irq_domain,
  296. GIC_SHARED_TO_HWIRQ(intr));
  297. if (chained)
  298. generic_handle_irq(virq);
  299. else
  300. do_IRQ(virq);
  301. }
  302. }
  303. static void gic_mask_irq(struct irq_data *d)
  304. {
  305. gic_reset_mask(GIC_HWIRQ_TO_SHARED(d->hwirq));
  306. }
  307. static void gic_unmask_irq(struct irq_data *d)
  308. {
  309. gic_set_mask(GIC_HWIRQ_TO_SHARED(d->hwirq));
  310. }
  311. static void gic_ack_irq(struct irq_data *d)
  312. {
  313. unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
  314. gic_write(GIC_REG(SHARED, GIC_SH_WEDGE), GIC_SH_WEDGE_CLR(irq));
  315. }
  316. static int gic_set_type(struct irq_data *d, unsigned int type)
  317. {
  318. unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
  319. unsigned long flags;
  320. bool is_edge;
  321. spin_lock_irqsave(&gic_lock, flags);
  322. switch (type & IRQ_TYPE_SENSE_MASK) {
  323. case IRQ_TYPE_EDGE_FALLING:
  324. gic_set_polarity(irq, GIC_POL_NEG);
  325. gic_set_trigger(irq, GIC_TRIG_EDGE);
  326. gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
  327. is_edge = true;
  328. break;
  329. case IRQ_TYPE_EDGE_RISING:
  330. gic_set_polarity(irq, GIC_POL_POS);
  331. gic_set_trigger(irq, GIC_TRIG_EDGE);
  332. gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
  333. is_edge = true;
  334. break;
  335. case IRQ_TYPE_EDGE_BOTH:
  336. /* polarity is irrelevant in this case */
  337. gic_set_trigger(irq, GIC_TRIG_EDGE);
  338. gic_set_dual_edge(irq, GIC_TRIG_DUAL_ENABLE);
  339. is_edge = true;
  340. break;
  341. case IRQ_TYPE_LEVEL_LOW:
  342. gic_set_polarity(irq, GIC_POL_NEG);
  343. gic_set_trigger(irq, GIC_TRIG_LEVEL);
  344. gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
  345. is_edge = false;
  346. break;
  347. case IRQ_TYPE_LEVEL_HIGH:
  348. default:
  349. gic_set_polarity(irq, GIC_POL_POS);
  350. gic_set_trigger(irq, GIC_TRIG_LEVEL);
  351. gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
  352. is_edge = false;
  353. break;
  354. }
  355. if (is_edge)
  356. irq_set_chip_handler_name_locked(d, &gic_edge_irq_controller,
  357. handle_edge_irq, NULL);
  358. else
  359. irq_set_chip_handler_name_locked(d, &gic_level_irq_controller,
  360. handle_level_irq, NULL);
  361. spin_unlock_irqrestore(&gic_lock, flags);
  362. return 0;
  363. }
  364. #ifdef CONFIG_SMP
  365. static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
  366. bool force)
  367. {
  368. unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
  369. cpumask_t tmp = CPU_MASK_NONE;
  370. unsigned long flags;
  371. int i;
  372. cpumask_and(&tmp, cpumask, cpu_online_mask);
  373. if (cpumask_empty(&tmp))
  374. return -EINVAL;
  375. /* Assumption : cpumask refers to a single CPU */
  376. spin_lock_irqsave(&gic_lock, flags);
  377. /* Re-route this IRQ */
  378. gic_map_to_vpe(irq, mips_cm_vp_id(cpumask_first(&tmp)));
  379. /* Update the pcpu_masks */
  380. for (i = 0; i < min(gic_vpes, NR_CPUS); i++)
  381. clear_bit(irq, pcpu_masks[i].pcpu_mask);
  382. set_bit(irq, pcpu_masks[cpumask_first(&tmp)].pcpu_mask);
  383. cpumask_copy(irq_data_get_affinity_mask(d), cpumask);
  384. spin_unlock_irqrestore(&gic_lock, flags);
  385. return IRQ_SET_MASK_OK_NOCOPY;
  386. }
  387. #endif
  388. static struct irq_chip gic_level_irq_controller = {
  389. .name = "MIPS GIC",
  390. .irq_mask = gic_mask_irq,
  391. .irq_unmask = gic_unmask_irq,
  392. .irq_set_type = gic_set_type,
  393. #ifdef CONFIG_SMP
  394. .irq_set_affinity = gic_set_affinity,
  395. #endif
  396. };
  397. static struct irq_chip gic_edge_irq_controller = {
  398. .name = "MIPS GIC",
  399. .irq_ack = gic_ack_irq,
  400. .irq_mask = gic_mask_irq,
  401. .irq_unmask = gic_unmask_irq,
  402. .irq_set_type = gic_set_type,
  403. #ifdef CONFIG_SMP
  404. .irq_set_affinity = gic_set_affinity,
  405. #endif
  406. .ipi_send_single = gic_send_ipi,
  407. };
  408. static void gic_handle_local_int(bool chained)
  409. {
  410. unsigned long pending, masked;
  411. unsigned int intr, virq;
  412. pending = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_PEND));
  413. masked = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_MASK));
  414. bitmap_and(&pending, &pending, &masked, GIC_NUM_LOCAL_INTRS);
  415. for_each_set_bit(intr, &pending, GIC_NUM_LOCAL_INTRS) {
  416. virq = irq_linear_revmap(gic_irq_domain,
  417. GIC_LOCAL_TO_HWIRQ(intr));
  418. if (chained)
  419. generic_handle_irq(virq);
  420. else
  421. do_IRQ(virq);
  422. }
  423. }
  424. static void gic_mask_local_irq(struct irq_data *d)
  425. {
  426. int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
  427. gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_RMASK), 1 << intr);
  428. }
  429. static void gic_unmask_local_irq(struct irq_data *d)
  430. {
  431. int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
  432. gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_SMASK), 1 << intr);
  433. }
  434. static struct irq_chip gic_local_irq_controller = {
  435. .name = "MIPS GIC Local",
  436. .irq_mask = gic_mask_local_irq,
  437. .irq_unmask = gic_unmask_local_irq,
  438. };
  439. static void gic_mask_local_irq_all_vpes(struct irq_data *d)
  440. {
  441. int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
  442. int i;
  443. unsigned long flags;
  444. spin_lock_irqsave(&gic_lock, flags);
  445. for (i = 0; i < gic_vpes; i++) {
  446. gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR),
  447. mips_cm_vp_id(i));
  448. gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << intr);
  449. }
  450. spin_unlock_irqrestore(&gic_lock, flags);
  451. }
  452. static void gic_unmask_local_irq_all_vpes(struct irq_data *d)
  453. {
  454. int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
  455. int i;
  456. unsigned long flags;
  457. spin_lock_irqsave(&gic_lock, flags);
  458. for (i = 0; i < gic_vpes; i++) {
  459. gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR),
  460. mips_cm_vp_id(i));
  461. gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_SMASK), 1 << intr);
  462. }
  463. spin_unlock_irqrestore(&gic_lock, flags);
  464. }
  465. static struct irq_chip gic_all_vpes_local_irq_controller = {
  466. .name = "MIPS GIC Local",
  467. .irq_mask = gic_mask_local_irq_all_vpes,
  468. .irq_unmask = gic_unmask_local_irq_all_vpes,
  469. };
  470. static void __gic_irq_dispatch(void)
  471. {
  472. gic_handle_local_int(false);
  473. gic_handle_shared_int(false);
  474. }
  475. static void gic_irq_dispatch(struct irq_desc *desc)
  476. {
  477. gic_handle_local_int(true);
  478. gic_handle_shared_int(true);
  479. }
  480. static void __init gic_basic_init(void)
  481. {
  482. unsigned int i;
  483. board_bind_eic_interrupt = &gic_bind_eic_interrupt;
  484. /* Setup defaults */
  485. for (i = 0; i < gic_shared_intrs; i++) {
  486. gic_set_polarity(i, GIC_POL_POS);
  487. gic_set_trigger(i, GIC_TRIG_LEVEL);
  488. gic_reset_mask(i);
  489. }
  490. for (i = 0; i < gic_vpes; i++) {
  491. unsigned int j;
  492. gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR),
  493. mips_cm_vp_id(i));
  494. for (j = 0; j < GIC_NUM_LOCAL_INTRS; j++) {
  495. if (!gic_local_irq_is_routable(j))
  496. continue;
  497. gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << j);
  498. }
  499. }
  500. }
  501. static int gic_local_irq_domain_map(struct irq_domain *d, unsigned int virq,
  502. irq_hw_number_t hw)
  503. {
  504. int intr = GIC_HWIRQ_TO_LOCAL(hw);
  505. int ret = 0;
  506. int i;
  507. unsigned long flags;
  508. if (!gic_local_irq_is_routable(intr))
  509. return -EPERM;
  510. spin_lock_irqsave(&gic_lock, flags);
  511. for (i = 0; i < gic_vpes; i++) {
  512. u32 val = GIC_MAP_TO_PIN_MSK | gic_cpu_pin;
  513. gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR),
  514. mips_cm_vp_id(i));
  515. switch (intr) {
  516. case GIC_LOCAL_INT_WD:
  517. gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_WD_MAP), val);
  518. break;
  519. case GIC_LOCAL_INT_COMPARE:
  520. gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_MAP),
  521. val);
  522. break;
  523. case GIC_LOCAL_INT_TIMER:
  524. /* CONFIG_MIPS_CMP workaround (see __gic_init) */
  525. val = GIC_MAP_TO_PIN_MSK | timer_cpu_pin;
  526. gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_TIMER_MAP),
  527. val);
  528. break;
  529. case GIC_LOCAL_INT_PERFCTR:
  530. gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_PERFCTR_MAP),
  531. val);
  532. break;
  533. case GIC_LOCAL_INT_SWINT0:
  534. gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_SWINT0_MAP),
  535. val);
  536. break;
  537. case GIC_LOCAL_INT_SWINT1:
  538. gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_SWINT1_MAP),
  539. val);
  540. break;
  541. case GIC_LOCAL_INT_FDC:
  542. gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_FDC_MAP), val);
  543. break;
  544. default:
  545. pr_err("Invalid local IRQ %d\n", intr);
  546. ret = -EINVAL;
  547. break;
  548. }
  549. }
  550. spin_unlock_irqrestore(&gic_lock, flags);
  551. return ret;
  552. }
  553. static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
  554. irq_hw_number_t hw, unsigned int vpe)
  555. {
  556. int intr = GIC_HWIRQ_TO_SHARED(hw);
  557. unsigned long flags;
  558. int i;
  559. spin_lock_irqsave(&gic_lock, flags);
  560. gic_map_to_pin(intr, gic_cpu_pin);
  561. gic_map_to_vpe(intr, mips_cm_vp_id(vpe));
  562. for (i = 0; i < min(gic_vpes, NR_CPUS); i++)
  563. clear_bit(intr, pcpu_masks[i].pcpu_mask);
  564. set_bit(intr, pcpu_masks[vpe].pcpu_mask);
  565. spin_unlock_irqrestore(&gic_lock, flags);
  566. return 0;
  567. }
  568. static int gic_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
  569. const u32 *intspec, unsigned int intsize,
  570. irq_hw_number_t *out_hwirq,
  571. unsigned int *out_type)
  572. {
  573. if (intsize != 3)
  574. return -EINVAL;
  575. if (intspec[0] == GIC_SHARED)
  576. *out_hwirq = GIC_SHARED_TO_HWIRQ(intspec[1]);
  577. else if (intspec[0] == GIC_LOCAL)
  578. *out_hwirq = GIC_LOCAL_TO_HWIRQ(intspec[1]);
  579. else
  580. return -EINVAL;
  581. *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
  582. return 0;
  583. }
  584. static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
  585. irq_hw_number_t hwirq)
  586. {
  587. int err;
  588. if (hwirq >= GIC_SHARED_HWIRQ_BASE) {
  589. /* verify that shared irqs don't conflict with an IPI irq */
  590. if (test_bit(GIC_HWIRQ_TO_SHARED(hwirq), ipi_resrv))
  591. return -EBUSY;
  592. err = irq_domain_set_hwirq_and_chip(d, virq, hwirq,
  593. &gic_level_irq_controller,
  594. NULL);
  595. if (err)
  596. return err;
  597. return gic_shared_irq_domain_map(d, virq, hwirq, 0);
  598. }
  599. switch (GIC_HWIRQ_TO_LOCAL(hwirq)) {
  600. case GIC_LOCAL_INT_TIMER:
  601. case GIC_LOCAL_INT_PERFCTR:
  602. case GIC_LOCAL_INT_FDC:
  603. /*
  604. * HACK: These are all really percpu interrupts, but
  605. * the rest of the MIPS kernel code does not use the
  606. * percpu IRQ API for them.
  607. */
  608. err = irq_domain_set_hwirq_and_chip(d, virq, hwirq,
  609. &gic_all_vpes_local_irq_controller,
  610. NULL);
  611. if (err)
  612. return err;
  613. irq_set_handler(virq, handle_percpu_irq);
  614. break;
  615. default:
  616. err = irq_domain_set_hwirq_and_chip(d, virq, hwirq,
  617. &gic_local_irq_controller,
  618. NULL);
  619. if (err)
  620. return err;
  621. irq_set_handler(virq, handle_percpu_devid_irq);
  622. irq_set_percpu_devid(virq);
  623. break;
  624. }
  625. return gic_local_irq_domain_map(d, virq, hwirq);
  626. }
  627. static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq,
  628. unsigned int nr_irqs, void *arg)
  629. {
  630. struct irq_fwspec *fwspec = arg;
  631. irq_hw_number_t hwirq;
  632. if (fwspec->param[0] == GIC_SHARED)
  633. hwirq = GIC_SHARED_TO_HWIRQ(fwspec->param[1]);
  634. else
  635. hwirq = GIC_LOCAL_TO_HWIRQ(fwspec->param[1]);
  636. return gic_irq_domain_map(d, virq, hwirq);
  637. }
  638. void gic_irq_domain_free(struct irq_domain *d, unsigned int virq,
  639. unsigned int nr_irqs)
  640. {
  641. }
  642. static const struct irq_domain_ops gic_irq_domain_ops = {
  643. .xlate = gic_irq_domain_xlate,
  644. .alloc = gic_irq_domain_alloc,
  645. .free = gic_irq_domain_free,
  646. .map = gic_irq_domain_map,
  647. };
  648. static int gic_ipi_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
  649. const u32 *intspec, unsigned int intsize,
  650. irq_hw_number_t *out_hwirq,
  651. unsigned int *out_type)
  652. {
  653. /*
  654. * There's nothing to translate here. hwirq is dynamically allocated and
  655. * the irq type is always edge triggered.
  656. * */
  657. *out_hwirq = 0;
  658. *out_type = IRQ_TYPE_EDGE_RISING;
  659. return 0;
  660. }
  661. static int gic_ipi_domain_alloc(struct irq_domain *d, unsigned int virq,
  662. unsigned int nr_irqs, void *arg)
  663. {
  664. struct cpumask *ipimask = arg;
  665. irq_hw_number_t hwirq, base_hwirq;
  666. int cpu, ret, i;
  667. base_hwirq = find_first_bit(ipi_available, gic_shared_intrs);
  668. if (base_hwirq == gic_shared_intrs)
  669. return -ENOMEM;
  670. /* check that we have enough space */
  671. for (i = base_hwirq; i < nr_irqs; i++) {
  672. if (!test_bit(i, ipi_available))
  673. return -EBUSY;
  674. }
  675. bitmap_clear(ipi_available, base_hwirq, nr_irqs);
  676. /* map the hwirq for each cpu consecutively */
  677. i = 0;
  678. for_each_cpu(cpu, ipimask) {
  679. hwirq = GIC_SHARED_TO_HWIRQ(base_hwirq + i);
  680. ret = irq_domain_set_hwirq_and_chip(d, virq + i, hwirq,
  681. &gic_edge_irq_controller,
  682. NULL);
  683. if (ret)
  684. goto error;
  685. ret = irq_domain_set_hwirq_and_chip(d->parent, virq + i, hwirq,
  686. &gic_edge_irq_controller,
  687. NULL);
  688. if (ret)
  689. goto error;
  690. ret = irq_set_irq_type(virq + i, IRQ_TYPE_EDGE_RISING);
  691. if (ret)
  692. goto error;
  693. ret = gic_shared_irq_domain_map(d, virq + i, hwirq, cpu);
  694. if (ret)
  695. goto error;
  696. i++;
  697. }
  698. return 0;
  699. error:
  700. bitmap_set(ipi_available, base_hwirq, nr_irqs);
  701. return ret;
  702. }
  703. void gic_ipi_domain_free(struct irq_domain *d, unsigned int virq,
  704. unsigned int nr_irqs)
  705. {
  706. irq_hw_number_t base_hwirq;
  707. struct irq_data *data;
  708. data = irq_get_irq_data(virq);
  709. if (!data)
  710. return;
  711. base_hwirq = GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(data));
  712. bitmap_set(ipi_available, base_hwirq, nr_irqs);
  713. }
  714. int gic_ipi_domain_match(struct irq_domain *d, struct device_node *node,
  715. enum irq_domain_bus_token bus_token)
  716. {
  717. bool is_ipi;
  718. switch (bus_token) {
  719. case DOMAIN_BUS_IPI:
  720. is_ipi = d->bus_token == bus_token;
  721. return (!node || to_of_node(d->fwnode) == node) && is_ipi;
  722. break;
  723. default:
  724. return 0;
  725. }
  726. }
  727. static struct irq_domain_ops gic_ipi_domain_ops = {
  728. .xlate = gic_ipi_domain_xlate,
  729. .alloc = gic_ipi_domain_alloc,
  730. .free = gic_ipi_domain_free,
  731. .match = gic_ipi_domain_match,
  732. };
  733. static void __init __gic_init(unsigned long gic_base_addr,
  734. unsigned long gic_addrspace_size,
  735. unsigned int cpu_vec, unsigned int irqbase,
  736. struct device_node *node)
  737. {
  738. unsigned int gicconfig, cpu;
  739. unsigned int v[2];
  740. __gic_base_addr = gic_base_addr;
  741. gic_base = ioremap_nocache(gic_base_addr, gic_addrspace_size);
  742. gicconfig = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
  743. gic_shared_intrs = (gicconfig & GIC_SH_CONFIG_NUMINTRS_MSK) >>
  744. GIC_SH_CONFIG_NUMINTRS_SHF;
  745. gic_shared_intrs = ((gic_shared_intrs + 1) * 8);
  746. gic_vpes = (gicconfig & GIC_SH_CONFIG_NUMVPES_MSK) >>
  747. GIC_SH_CONFIG_NUMVPES_SHF;
  748. gic_vpes = gic_vpes + 1;
  749. if (cpu_has_veic) {
  750. /* Set EIC mode for all VPEs */
  751. for_each_present_cpu(cpu) {
  752. gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR),
  753. mips_cm_vp_id(cpu));
  754. gic_write(GIC_REG(VPE_OTHER, GIC_VPE_CTL),
  755. GIC_VPE_CTL_EIC_MODE_MSK);
  756. }
  757. /* Always use vector 1 in EIC mode */
  758. gic_cpu_pin = 0;
  759. timer_cpu_pin = gic_cpu_pin;
  760. set_vi_handler(gic_cpu_pin + GIC_PIN_TO_VEC_OFFSET,
  761. __gic_irq_dispatch);
  762. } else {
  763. gic_cpu_pin = cpu_vec - GIC_CPU_PIN_OFFSET;
  764. irq_set_chained_handler(MIPS_CPU_IRQ_BASE + cpu_vec,
  765. gic_irq_dispatch);
  766. /*
  767. * With the CMP implementation of SMP (deprecated), other CPUs
  768. * are started by the bootloader and put into a timer based
  769. * waiting poll loop. We must not re-route those CPU's local
  770. * timer interrupts as the wait instruction will never finish,
  771. * so just handle whatever CPU interrupt it is routed to by
  772. * default.
  773. *
  774. * This workaround should be removed when CMP support is
  775. * dropped.
  776. */
  777. if (IS_ENABLED(CONFIG_MIPS_CMP) &&
  778. gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER)) {
  779. timer_cpu_pin = gic_read32(GIC_REG(VPE_LOCAL,
  780. GIC_VPE_TIMER_MAP)) &
  781. GIC_MAP_MSK;
  782. irq_set_chained_handler(MIPS_CPU_IRQ_BASE +
  783. GIC_CPU_PIN_OFFSET +
  784. timer_cpu_pin,
  785. gic_irq_dispatch);
  786. } else {
  787. timer_cpu_pin = gic_cpu_pin;
  788. }
  789. }
  790. gic_irq_domain = irq_domain_add_simple(node, GIC_NUM_LOCAL_INTRS +
  791. gic_shared_intrs, irqbase,
  792. &gic_irq_domain_ops, NULL);
  793. if (!gic_irq_domain)
  794. panic("Failed to add GIC IRQ domain");
  795. gic_irq_domain->name = "mips-gic-irq";
  796. gic_ipi_domain = irq_domain_add_hierarchy(gic_irq_domain,
  797. IRQ_DOMAIN_FLAG_IPI_PER_CPU,
  798. GIC_NUM_LOCAL_INTRS + gic_shared_intrs,
  799. node, &gic_ipi_domain_ops, NULL);
  800. if (!gic_ipi_domain)
  801. panic("Failed to add GIC IPI domain");
  802. gic_ipi_domain->name = "mips-gic-ipi";
  803. gic_ipi_domain->bus_token = DOMAIN_BUS_IPI;
  804. if (node &&
  805. !of_property_read_u32_array(node, "mti,reserved-ipi-vectors", v, 2)) {
  806. bitmap_set(ipi_resrv, v[0], v[1]);
  807. } else {
  808. /* Make the last 2 * gic_vpes available for IPIs */
  809. bitmap_set(ipi_resrv,
  810. gic_shared_intrs - 2 * gic_vpes,
  811. 2 * gic_vpes);
  812. }
  813. bitmap_copy(ipi_available, ipi_resrv, GIC_MAX_INTRS);
  814. gic_basic_init();
  815. }
  816. void __init gic_init(unsigned long gic_base_addr,
  817. unsigned long gic_addrspace_size,
  818. unsigned int cpu_vec, unsigned int irqbase)
  819. {
  820. __gic_init(gic_base_addr, gic_addrspace_size, cpu_vec, irqbase, NULL);
  821. }
  822. static int __init gic_of_init(struct device_node *node,
  823. struct device_node *parent)
  824. {
  825. struct resource res;
  826. unsigned int cpu_vec, i = 0, reserved = 0;
  827. phys_addr_t gic_base;
  828. size_t gic_len;
  829. /* Find the first available CPU vector. */
  830. while (!of_property_read_u32_index(node, "mti,reserved-cpu-vectors",
  831. i++, &cpu_vec))
  832. reserved |= BIT(cpu_vec);
  833. for (cpu_vec = 2; cpu_vec < 8; cpu_vec++) {
  834. if (!(reserved & BIT(cpu_vec)))
  835. break;
  836. }
  837. if (cpu_vec == 8) {
  838. pr_err("No CPU vectors available for GIC\n");
  839. return -ENODEV;
  840. }
  841. if (of_address_to_resource(node, 0, &res)) {
  842. /*
  843. * Probe the CM for the GIC base address if not specified
  844. * in the device-tree.
  845. */
  846. if (mips_cm_present()) {
  847. gic_base = read_gcr_gic_base() &
  848. ~CM_GCR_GIC_BASE_GICEN_MSK;
  849. gic_len = 0x20000;
  850. } else {
  851. pr_err("Failed to get GIC memory range\n");
  852. return -ENODEV;
  853. }
  854. } else {
  855. gic_base = res.start;
  856. gic_len = resource_size(&res);
  857. }
  858. if (mips_cm_present())
  859. write_gcr_gic_base(gic_base | CM_GCR_GIC_BASE_GICEN_MSK);
  860. gic_present = true;
  861. __gic_init(gic_base, gic_len, cpu_vec, 0, node);
  862. return 0;
  863. }
  864. IRQCHIP_DECLARE(mips_gic, "mti,gic", gic_of_init);