irq-mips-gic.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 2008 Ralf Baechle (ralf@linux-mips.org)
  7. * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
  8. */
  9. #include <linux/bitmap.h>
  10. #include <linux/clocksource.h>
  11. #include <linux/init.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/irq.h>
  14. #include <linux/irqchip/mips-gic.h>
  15. #include <linux/of_address.h>
  16. #include <linux/sched.h>
  17. #include <linux/smp.h>
  18. #include <asm/mips-cm.h>
  19. #include <asm/setup.h>
  20. #include <asm/traps.h>
  21. #include <dt-bindings/interrupt-controller/mips-gic.h>
  22. #include "irqchip.h"
  23. unsigned int gic_present;
  24. struct gic_pcpu_mask {
  25. DECLARE_BITMAP(pcpu_mask, GIC_MAX_INTRS);
  26. };
  27. static void __iomem *gic_base;
  28. static struct gic_pcpu_mask pcpu_masks[NR_CPUS];
  29. static DEFINE_SPINLOCK(gic_lock);
  30. static struct irq_domain *gic_irq_domain;
  31. static int gic_shared_intrs;
  32. static int gic_vpes;
  33. static unsigned int gic_cpu_pin;
  34. static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller;
  35. static void __gic_irq_dispatch(void);
  36. static inline unsigned int gic_read(unsigned int reg)
  37. {
  38. return __raw_readl(gic_base + reg);
  39. }
  40. static inline void gic_write(unsigned int reg, unsigned int val)
  41. {
  42. __raw_writel(val, gic_base + reg);
  43. }
  44. static inline void gic_update_bits(unsigned int reg, unsigned int mask,
  45. unsigned int val)
  46. {
  47. unsigned int regval;
  48. regval = gic_read(reg);
  49. regval &= ~mask;
  50. regval |= val;
  51. gic_write(reg, regval);
  52. }
  53. static inline void gic_reset_mask(unsigned int intr)
  54. {
  55. gic_write(GIC_REG(SHARED, GIC_SH_RMASK) + GIC_INTR_OFS(intr),
  56. 1 << GIC_INTR_BIT(intr));
  57. }
  58. static inline void gic_set_mask(unsigned int intr)
  59. {
  60. gic_write(GIC_REG(SHARED, GIC_SH_SMASK) + GIC_INTR_OFS(intr),
  61. 1 << GIC_INTR_BIT(intr));
  62. }
  63. static inline void gic_set_polarity(unsigned int intr, unsigned int pol)
  64. {
  65. gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_POLARITY) +
  66. GIC_INTR_OFS(intr), 1 << GIC_INTR_BIT(intr),
  67. pol << GIC_INTR_BIT(intr));
  68. }
  69. static inline void gic_set_trigger(unsigned int intr, unsigned int trig)
  70. {
  71. gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_TRIGGER) +
  72. GIC_INTR_OFS(intr), 1 << GIC_INTR_BIT(intr),
  73. trig << GIC_INTR_BIT(intr));
  74. }
  75. static inline void gic_set_dual_edge(unsigned int intr, unsigned int dual)
  76. {
  77. gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_DUAL) + GIC_INTR_OFS(intr),
  78. 1 << GIC_INTR_BIT(intr),
  79. dual << GIC_INTR_BIT(intr));
  80. }
  81. static inline void gic_map_to_pin(unsigned int intr, unsigned int pin)
  82. {
  83. gic_write(GIC_REG(SHARED, GIC_SH_INTR_MAP_TO_PIN_BASE) +
  84. GIC_SH_MAP_TO_PIN(intr), GIC_MAP_TO_PIN_MSK | pin);
  85. }
  86. static inline void gic_map_to_vpe(unsigned int intr, unsigned int vpe)
  87. {
  88. gic_write(GIC_REG(SHARED, GIC_SH_INTR_MAP_TO_VPE_BASE) +
  89. GIC_SH_MAP_TO_VPE_REG_OFF(intr, vpe),
  90. GIC_SH_MAP_TO_VPE_REG_BIT(vpe));
  91. }
  92. #ifdef CONFIG_CLKSRC_MIPS_GIC
  93. cycle_t gic_read_count(void)
  94. {
  95. unsigned int hi, hi2, lo;
  96. do {
  97. hi = gic_read(GIC_REG(SHARED, GIC_SH_COUNTER_63_32));
  98. lo = gic_read(GIC_REG(SHARED, GIC_SH_COUNTER_31_00));
  99. hi2 = gic_read(GIC_REG(SHARED, GIC_SH_COUNTER_63_32));
  100. } while (hi2 != hi);
  101. return (((cycle_t) hi) << 32) + lo;
  102. }
  103. unsigned int gic_get_count_width(void)
  104. {
  105. unsigned int bits, config;
  106. config = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
  107. bits = 32 + 4 * ((config & GIC_SH_CONFIG_COUNTBITS_MSK) >>
  108. GIC_SH_CONFIG_COUNTBITS_SHF);
  109. return bits;
  110. }
  111. void gic_write_compare(cycle_t cnt)
  112. {
  113. gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI),
  114. (int)(cnt >> 32));
  115. gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO),
  116. (int)(cnt & 0xffffffff));
  117. }
  118. void gic_write_cpu_compare(cycle_t cnt, int cpu)
  119. {
  120. unsigned long flags;
  121. local_irq_save(flags);
  122. gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), cpu);
  123. gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_HI),
  124. (int)(cnt >> 32));
  125. gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_LO),
  126. (int)(cnt & 0xffffffff));
  127. local_irq_restore(flags);
  128. }
  129. cycle_t gic_read_compare(void)
  130. {
  131. unsigned int hi, lo;
  132. hi = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI));
  133. lo = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO));
  134. return (((cycle_t) hi) << 32) + lo;
  135. }
  136. #endif
  137. static bool gic_local_irq_is_routable(int intr)
  138. {
  139. u32 vpe_ctl;
  140. /* All local interrupts are routable in EIC mode. */
  141. if (cpu_has_veic)
  142. return true;
  143. vpe_ctl = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_CTL));
  144. switch (intr) {
  145. case GIC_LOCAL_INT_TIMER:
  146. return vpe_ctl & GIC_VPE_CTL_TIMER_RTBL_MSK;
  147. case GIC_LOCAL_INT_PERFCTR:
  148. return vpe_ctl & GIC_VPE_CTL_PERFCNT_RTBL_MSK;
  149. case GIC_LOCAL_INT_FDC:
  150. return vpe_ctl & GIC_VPE_CTL_FDC_RTBL_MSK;
  151. case GIC_LOCAL_INT_SWINT0:
  152. case GIC_LOCAL_INT_SWINT1:
  153. return vpe_ctl & GIC_VPE_CTL_SWINT_RTBL_MSK;
  154. default:
  155. return true;
  156. }
  157. }
  158. unsigned int gic_get_timer_pending(void)
  159. {
  160. unsigned int vpe_pending;
  161. vpe_pending = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_PEND));
  162. return vpe_pending & GIC_VPE_PEND_TIMER_MSK;
  163. }
  164. static void gic_bind_eic_interrupt(int irq, int set)
  165. {
  166. /* Convert irq vector # to hw int # */
  167. irq -= GIC_PIN_TO_VEC_OFFSET;
  168. /* Set irq to use shadow set */
  169. gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_EIC_SHADOW_SET_BASE) +
  170. GIC_VPE_EIC_SS(irq), set);
  171. }
  172. void gic_send_ipi(unsigned int intr)
  173. {
  174. gic_write(GIC_REG(SHARED, GIC_SH_WEDGE), GIC_SH_WEDGE_SET(intr));
  175. }
  176. int gic_get_c0_compare_int(void)
  177. {
  178. if (!gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER))
  179. return MIPS_CPU_IRQ_BASE + cp0_compare_irq;
  180. return irq_create_mapping(gic_irq_domain,
  181. GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_TIMER));
  182. }
  183. int gic_get_c0_perfcount_int(void)
  184. {
  185. if (!gic_local_irq_is_routable(GIC_LOCAL_INT_PERFCTR)) {
  186. /* Is the erformance counter shared with the timer? */
  187. if (cp0_perfcount_irq < 0)
  188. return -1;
  189. return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
  190. }
  191. return irq_create_mapping(gic_irq_domain,
  192. GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_PERFCTR));
  193. }
  194. static unsigned int gic_get_int(void)
  195. {
  196. unsigned int i;
  197. unsigned long *pcpu_mask;
  198. unsigned long pending_reg, intrmask_reg;
  199. DECLARE_BITMAP(pending, GIC_MAX_INTRS);
  200. DECLARE_BITMAP(intrmask, GIC_MAX_INTRS);
  201. /* Get per-cpu bitmaps */
  202. pcpu_mask = pcpu_masks[smp_processor_id()].pcpu_mask;
  203. pending_reg = GIC_REG(SHARED, GIC_SH_PEND);
  204. intrmask_reg = GIC_REG(SHARED, GIC_SH_MASK);
  205. for (i = 0; i < BITS_TO_LONGS(gic_shared_intrs); i++) {
  206. pending[i] = gic_read(pending_reg);
  207. intrmask[i] = gic_read(intrmask_reg);
  208. pending_reg += 0x4;
  209. intrmask_reg += 0x4;
  210. }
  211. bitmap_and(pending, pending, intrmask, gic_shared_intrs);
  212. bitmap_and(pending, pending, pcpu_mask, gic_shared_intrs);
  213. return find_first_bit(pending, gic_shared_intrs);
  214. }
  215. static void gic_mask_irq(struct irq_data *d)
  216. {
  217. gic_reset_mask(GIC_HWIRQ_TO_SHARED(d->hwirq));
  218. }
  219. static void gic_unmask_irq(struct irq_data *d)
  220. {
  221. gic_set_mask(GIC_HWIRQ_TO_SHARED(d->hwirq));
  222. }
  223. static void gic_ack_irq(struct irq_data *d)
  224. {
  225. unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
  226. gic_write(GIC_REG(SHARED, GIC_SH_WEDGE), GIC_SH_WEDGE_CLR(irq));
  227. }
  228. static int gic_set_type(struct irq_data *d, unsigned int type)
  229. {
  230. unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
  231. unsigned long flags;
  232. bool is_edge;
  233. spin_lock_irqsave(&gic_lock, flags);
  234. switch (type & IRQ_TYPE_SENSE_MASK) {
  235. case IRQ_TYPE_EDGE_FALLING:
  236. gic_set_polarity(irq, GIC_POL_NEG);
  237. gic_set_trigger(irq, GIC_TRIG_EDGE);
  238. gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
  239. is_edge = true;
  240. break;
  241. case IRQ_TYPE_EDGE_RISING:
  242. gic_set_polarity(irq, GIC_POL_POS);
  243. gic_set_trigger(irq, GIC_TRIG_EDGE);
  244. gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
  245. is_edge = true;
  246. break;
  247. case IRQ_TYPE_EDGE_BOTH:
  248. /* polarity is irrelevant in this case */
  249. gic_set_trigger(irq, GIC_TRIG_EDGE);
  250. gic_set_dual_edge(irq, GIC_TRIG_DUAL_ENABLE);
  251. is_edge = true;
  252. break;
  253. case IRQ_TYPE_LEVEL_LOW:
  254. gic_set_polarity(irq, GIC_POL_NEG);
  255. gic_set_trigger(irq, GIC_TRIG_LEVEL);
  256. gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
  257. is_edge = false;
  258. break;
  259. case IRQ_TYPE_LEVEL_HIGH:
  260. default:
  261. gic_set_polarity(irq, GIC_POL_POS);
  262. gic_set_trigger(irq, GIC_TRIG_LEVEL);
  263. gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
  264. is_edge = false;
  265. break;
  266. }
  267. if (is_edge) {
  268. __irq_set_chip_handler_name_locked(d->irq,
  269. &gic_edge_irq_controller,
  270. handle_edge_irq, NULL);
  271. } else {
  272. __irq_set_chip_handler_name_locked(d->irq,
  273. &gic_level_irq_controller,
  274. handle_level_irq, NULL);
  275. }
  276. spin_unlock_irqrestore(&gic_lock, flags);
  277. return 0;
  278. }
  279. #ifdef CONFIG_SMP
  280. static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
  281. bool force)
  282. {
  283. unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
  284. cpumask_t tmp = CPU_MASK_NONE;
  285. unsigned long flags;
  286. int i;
  287. cpumask_and(&tmp, cpumask, cpu_online_mask);
  288. if (cpus_empty(tmp))
  289. return -EINVAL;
  290. /* Assumption : cpumask refers to a single CPU */
  291. spin_lock_irqsave(&gic_lock, flags);
  292. /* Re-route this IRQ */
  293. gic_map_to_vpe(irq, first_cpu(tmp));
  294. /* Update the pcpu_masks */
  295. for (i = 0; i < NR_CPUS; i++)
  296. clear_bit(irq, pcpu_masks[i].pcpu_mask);
  297. set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask);
  298. cpumask_copy(d->affinity, cpumask);
  299. spin_unlock_irqrestore(&gic_lock, flags);
  300. return IRQ_SET_MASK_OK_NOCOPY;
  301. }
  302. #endif
  303. static struct irq_chip gic_level_irq_controller = {
  304. .name = "MIPS GIC",
  305. .irq_mask = gic_mask_irq,
  306. .irq_unmask = gic_unmask_irq,
  307. .irq_set_type = gic_set_type,
  308. #ifdef CONFIG_SMP
  309. .irq_set_affinity = gic_set_affinity,
  310. #endif
  311. };
  312. static struct irq_chip gic_edge_irq_controller = {
  313. .name = "MIPS GIC",
  314. .irq_ack = gic_ack_irq,
  315. .irq_mask = gic_mask_irq,
  316. .irq_unmask = gic_unmask_irq,
  317. .irq_set_type = gic_set_type,
  318. #ifdef CONFIG_SMP
  319. .irq_set_affinity = gic_set_affinity,
  320. #endif
  321. };
  322. static unsigned int gic_get_local_int(void)
  323. {
  324. unsigned long pending, masked;
  325. pending = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_PEND));
  326. masked = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_MASK));
  327. bitmap_and(&pending, &pending, &masked, GIC_NUM_LOCAL_INTRS);
  328. return find_first_bit(&pending, GIC_NUM_LOCAL_INTRS);
  329. }
  330. static void gic_mask_local_irq(struct irq_data *d)
  331. {
  332. int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
  333. gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_RMASK), 1 << intr);
  334. }
  335. static void gic_unmask_local_irq(struct irq_data *d)
  336. {
  337. int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
  338. gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_SMASK), 1 << intr);
  339. }
  340. static struct irq_chip gic_local_irq_controller = {
  341. .name = "MIPS GIC Local",
  342. .irq_mask = gic_mask_local_irq,
  343. .irq_unmask = gic_unmask_local_irq,
  344. };
  345. static void gic_mask_local_irq_all_vpes(struct irq_data *d)
  346. {
  347. int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
  348. int i;
  349. unsigned long flags;
  350. spin_lock_irqsave(&gic_lock, flags);
  351. for (i = 0; i < gic_vpes; i++) {
  352. gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
  353. gic_write(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << intr);
  354. }
  355. spin_unlock_irqrestore(&gic_lock, flags);
  356. }
  357. static void gic_unmask_local_irq_all_vpes(struct irq_data *d)
  358. {
  359. int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
  360. int i;
  361. unsigned long flags;
  362. spin_lock_irqsave(&gic_lock, flags);
  363. for (i = 0; i < gic_vpes; i++) {
  364. gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
  365. gic_write(GIC_REG(VPE_OTHER, GIC_VPE_SMASK), 1 << intr);
  366. }
  367. spin_unlock_irqrestore(&gic_lock, flags);
  368. }
  369. static struct irq_chip gic_all_vpes_local_irq_controller = {
  370. .name = "MIPS GIC Local",
  371. .irq_mask = gic_mask_local_irq_all_vpes,
  372. .irq_unmask = gic_unmask_local_irq_all_vpes,
  373. };
  374. static void __gic_irq_dispatch(void)
  375. {
  376. unsigned int intr, virq;
  377. while ((intr = gic_get_local_int()) != GIC_NUM_LOCAL_INTRS) {
  378. virq = irq_linear_revmap(gic_irq_domain,
  379. GIC_LOCAL_TO_HWIRQ(intr));
  380. do_IRQ(virq);
  381. }
  382. while ((intr = gic_get_int()) != gic_shared_intrs) {
  383. virq = irq_linear_revmap(gic_irq_domain,
  384. GIC_SHARED_TO_HWIRQ(intr));
  385. do_IRQ(virq);
  386. }
  387. }
  388. static void gic_irq_dispatch(unsigned int irq, struct irq_desc *desc)
  389. {
  390. __gic_irq_dispatch();
  391. }
  392. #ifdef CONFIG_MIPS_GIC_IPI
  393. static int gic_resched_int_base;
  394. static int gic_call_int_base;
  395. unsigned int plat_ipi_resched_int_xlate(unsigned int cpu)
  396. {
  397. return gic_resched_int_base + cpu;
  398. }
  399. unsigned int plat_ipi_call_int_xlate(unsigned int cpu)
  400. {
  401. return gic_call_int_base + cpu;
  402. }
  403. static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
  404. {
  405. scheduler_ipi();
  406. return IRQ_HANDLED;
  407. }
  408. static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
  409. {
  410. smp_call_function_interrupt();
  411. return IRQ_HANDLED;
  412. }
  413. static struct irqaction irq_resched = {
  414. .handler = ipi_resched_interrupt,
  415. .flags = IRQF_PERCPU,
  416. .name = "IPI resched"
  417. };
  418. static struct irqaction irq_call = {
  419. .handler = ipi_call_interrupt,
  420. .flags = IRQF_PERCPU,
  421. .name = "IPI call"
  422. };
  423. static __init void gic_ipi_init_one(unsigned int intr, int cpu,
  424. struct irqaction *action)
  425. {
  426. int virq = irq_create_mapping(gic_irq_domain,
  427. GIC_SHARED_TO_HWIRQ(intr));
  428. int i;
  429. gic_map_to_vpe(intr, cpu);
  430. for (i = 0; i < NR_CPUS; i++)
  431. clear_bit(intr, pcpu_masks[i].pcpu_mask);
  432. set_bit(intr, pcpu_masks[cpu].pcpu_mask);
  433. irq_set_irq_type(virq, IRQ_TYPE_EDGE_RISING);
  434. irq_set_handler(virq, handle_percpu_irq);
  435. setup_irq(virq, action);
  436. }
  437. static __init void gic_ipi_init(void)
  438. {
  439. int i;
  440. /* Use last 2 * NR_CPUS interrupts as IPIs */
  441. gic_resched_int_base = gic_shared_intrs - nr_cpu_ids;
  442. gic_call_int_base = gic_resched_int_base - nr_cpu_ids;
  443. for (i = 0; i < nr_cpu_ids; i++) {
  444. gic_ipi_init_one(gic_call_int_base + i, i, &irq_call);
  445. gic_ipi_init_one(gic_resched_int_base + i, i, &irq_resched);
  446. }
  447. }
  448. #else
  449. static inline void gic_ipi_init(void)
  450. {
  451. }
  452. #endif
  453. static void __init gic_basic_init(void)
  454. {
  455. unsigned int i;
  456. board_bind_eic_interrupt = &gic_bind_eic_interrupt;
  457. /* Setup defaults */
  458. for (i = 0; i < gic_shared_intrs; i++) {
  459. gic_set_polarity(i, GIC_POL_POS);
  460. gic_set_trigger(i, GIC_TRIG_LEVEL);
  461. gic_reset_mask(i);
  462. }
  463. for (i = 0; i < gic_vpes; i++) {
  464. unsigned int j;
  465. gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
  466. for (j = 0; j < GIC_NUM_LOCAL_INTRS; j++) {
  467. if (!gic_local_irq_is_routable(j))
  468. continue;
  469. gic_write(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << j);
  470. }
  471. }
  472. }
  473. static int gic_local_irq_domain_map(struct irq_domain *d, unsigned int virq,
  474. irq_hw_number_t hw)
  475. {
  476. int intr = GIC_HWIRQ_TO_LOCAL(hw);
  477. int ret = 0;
  478. int i;
  479. unsigned long flags;
  480. if (!gic_local_irq_is_routable(intr))
  481. return -EPERM;
  482. /*
  483. * HACK: These are all really percpu interrupts, but the rest
  484. * of the MIPS kernel code does not use the percpu IRQ API for
  485. * the CP0 timer and performance counter interrupts.
  486. */
  487. if (intr != GIC_LOCAL_INT_TIMER && intr != GIC_LOCAL_INT_PERFCTR) {
  488. irq_set_chip_and_handler(virq,
  489. &gic_local_irq_controller,
  490. handle_percpu_devid_irq);
  491. irq_set_percpu_devid(virq);
  492. } else {
  493. irq_set_chip_and_handler(virq,
  494. &gic_all_vpes_local_irq_controller,
  495. handle_percpu_irq);
  496. }
  497. spin_lock_irqsave(&gic_lock, flags);
  498. for (i = 0; i < gic_vpes; i++) {
  499. u32 val = GIC_MAP_TO_PIN_MSK | gic_cpu_pin;
  500. gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
  501. switch (intr) {
  502. case GIC_LOCAL_INT_WD:
  503. gic_write(GIC_REG(VPE_OTHER, GIC_VPE_WD_MAP), val);
  504. break;
  505. case GIC_LOCAL_INT_COMPARE:
  506. gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_MAP), val);
  507. break;
  508. case GIC_LOCAL_INT_TIMER:
  509. gic_write(GIC_REG(VPE_OTHER, GIC_VPE_TIMER_MAP), val);
  510. break;
  511. case GIC_LOCAL_INT_PERFCTR:
  512. gic_write(GIC_REG(VPE_OTHER, GIC_VPE_PERFCTR_MAP), val);
  513. break;
  514. case GIC_LOCAL_INT_SWINT0:
  515. gic_write(GIC_REG(VPE_OTHER, GIC_VPE_SWINT0_MAP), val);
  516. break;
  517. case GIC_LOCAL_INT_SWINT1:
  518. gic_write(GIC_REG(VPE_OTHER, GIC_VPE_SWINT1_MAP), val);
  519. break;
  520. case GIC_LOCAL_INT_FDC:
  521. gic_write(GIC_REG(VPE_OTHER, GIC_VPE_FDC_MAP), val);
  522. break;
  523. default:
  524. pr_err("Invalid local IRQ %d\n", intr);
  525. ret = -EINVAL;
  526. break;
  527. }
  528. }
  529. spin_unlock_irqrestore(&gic_lock, flags);
  530. return ret;
  531. }
  532. static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
  533. irq_hw_number_t hw)
  534. {
  535. int intr = GIC_HWIRQ_TO_SHARED(hw);
  536. unsigned long flags;
  537. irq_set_chip_and_handler(virq, &gic_level_irq_controller,
  538. handle_level_irq);
  539. spin_lock_irqsave(&gic_lock, flags);
  540. gic_map_to_pin(intr, gic_cpu_pin);
  541. /* Map to VPE 0 by default */
  542. gic_map_to_vpe(intr, 0);
  543. set_bit(intr, pcpu_masks[0].pcpu_mask);
  544. spin_unlock_irqrestore(&gic_lock, flags);
  545. return 0;
  546. }
  547. static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
  548. irq_hw_number_t hw)
  549. {
  550. if (GIC_HWIRQ_TO_LOCAL(hw) < GIC_NUM_LOCAL_INTRS)
  551. return gic_local_irq_domain_map(d, virq, hw);
  552. return gic_shared_irq_domain_map(d, virq, hw);
  553. }
  554. static int gic_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
  555. const u32 *intspec, unsigned int intsize,
  556. irq_hw_number_t *out_hwirq,
  557. unsigned int *out_type)
  558. {
  559. if (intsize != 3)
  560. return -EINVAL;
  561. if (intspec[0] == GIC_SHARED)
  562. *out_hwirq = GIC_SHARED_TO_HWIRQ(intspec[1]);
  563. else if (intspec[0] == GIC_LOCAL)
  564. *out_hwirq = GIC_LOCAL_TO_HWIRQ(intspec[1]);
  565. else
  566. return -EINVAL;
  567. *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
  568. return 0;
  569. }
  570. static struct irq_domain_ops gic_irq_domain_ops = {
  571. .map = gic_irq_domain_map,
  572. .xlate = gic_irq_domain_xlate,
  573. };
  574. static void __init __gic_init(unsigned long gic_base_addr,
  575. unsigned long gic_addrspace_size,
  576. unsigned int cpu_vec, unsigned int irqbase,
  577. struct device_node *node)
  578. {
  579. unsigned int gicconfig;
  580. gic_base = ioremap_nocache(gic_base_addr, gic_addrspace_size);
  581. gicconfig = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
  582. gic_shared_intrs = (gicconfig & GIC_SH_CONFIG_NUMINTRS_MSK) >>
  583. GIC_SH_CONFIG_NUMINTRS_SHF;
  584. gic_shared_intrs = ((gic_shared_intrs + 1) * 8);
  585. gic_vpes = (gicconfig & GIC_SH_CONFIG_NUMVPES_MSK) >>
  586. GIC_SH_CONFIG_NUMVPES_SHF;
  587. gic_vpes = gic_vpes + 1;
  588. if (cpu_has_veic) {
  589. /* Always use vector 1 in EIC mode */
  590. gic_cpu_pin = 0;
  591. set_vi_handler(gic_cpu_pin + GIC_PIN_TO_VEC_OFFSET,
  592. __gic_irq_dispatch);
  593. } else {
  594. gic_cpu_pin = cpu_vec - GIC_CPU_PIN_OFFSET;
  595. irq_set_chained_handler(MIPS_CPU_IRQ_BASE + cpu_vec,
  596. gic_irq_dispatch);
  597. }
  598. gic_irq_domain = irq_domain_add_simple(node, GIC_NUM_LOCAL_INTRS +
  599. gic_shared_intrs, irqbase,
  600. &gic_irq_domain_ops, NULL);
  601. if (!gic_irq_domain)
  602. panic("Failed to add GIC IRQ domain");
  603. gic_basic_init();
  604. gic_ipi_init();
  605. }
  606. void __init gic_init(unsigned long gic_base_addr,
  607. unsigned long gic_addrspace_size,
  608. unsigned int cpu_vec, unsigned int irqbase)
  609. {
  610. __gic_init(gic_base_addr, gic_addrspace_size, cpu_vec, irqbase, NULL);
  611. }
  612. static int __init gic_of_init(struct device_node *node,
  613. struct device_node *parent)
  614. {
  615. struct resource res;
  616. unsigned int cpu_vec, i = 0, reserved = 0;
  617. phys_addr_t gic_base;
  618. size_t gic_len;
  619. /* Find the first available CPU vector. */
  620. while (!of_property_read_u32_index(node, "mti,reserved-cpu-vectors",
  621. i++, &cpu_vec))
  622. reserved |= BIT(cpu_vec);
  623. for (cpu_vec = 2; cpu_vec < 8; cpu_vec++) {
  624. if (!(reserved & BIT(cpu_vec)))
  625. break;
  626. }
  627. if (cpu_vec == 8) {
  628. pr_err("No CPU vectors available for GIC\n");
  629. return -ENODEV;
  630. }
  631. if (of_address_to_resource(node, 0, &res)) {
  632. /*
  633. * Probe the CM for the GIC base address if not specified
  634. * in the device-tree.
  635. */
  636. if (mips_cm_present()) {
  637. gic_base = read_gcr_gic_base() &
  638. ~CM_GCR_GIC_BASE_GICEN_MSK;
  639. gic_len = 0x20000;
  640. } else {
  641. pr_err("Failed to get GIC memory range\n");
  642. return -ENODEV;
  643. }
  644. } else {
  645. gic_base = res.start;
  646. gic_len = resource_size(&res);
  647. }
  648. if (mips_cm_present())
  649. write_gcr_gic_base(gic_base | CM_GCR_GIC_BASE_GICEN_MSK);
  650. gic_present = true;
  651. __gic_init(gic_base, gic_len, cpu_vec, 0, node);
  652. return 0;
  653. }
  654. IRQCHIP_DECLARE(mips_gic, "mti,gic", gic_of_init);