irq.c 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281
  1. /*
  2. * Copyright 2010 Tilera Corporation. All Rights Reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation, version 2.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11. * NON INFRINGEMENT. See the GNU General Public License for
  12. * more details.
  13. */
  14. #include <linux/module.h>
  15. #include <linux/seq_file.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/irq.h>
  18. #include <linux/kernel_stat.h>
  19. #include <linux/uaccess.h>
  20. #include <hv/drv_pcie_rc_intf.h>
  21. #include <arch/spr_def.h>
  22. #include <asm/traps.h>
  23. #include <linux/perf_event.h>
  24. /* Bit-flag stored in irq_desc->chip_data to indicate HW-cleared irqs. */
  25. #define IS_HW_CLEARED 1
  26. /*
  27. * The set of interrupts we enable for arch_local_irq_enable().
  28. * This is initialized to have just a single interrupt that the kernel
  29. * doesn't actually use as a sentinel. During kernel init,
  30. * interrupts are added as the kernel gets prepared to support them.
  31. * NOTE: we could probably initialize them all statically up front.
  32. */
  33. DEFINE_PER_CPU(unsigned long long, interrupts_enabled_mask) =
  34. INITIAL_INTERRUPTS_ENABLED;
  35. EXPORT_PER_CPU_SYMBOL(interrupts_enabled_mask);
  36. /* Define per-tile device interrupt statistics state. */
  37. DEFINE_PER_CPU(irq_cpustat_t, irq_stat) ____cacheline_internodealigned_in_smp;
  38. EXPORT_PER_CPU_SYMBOL(irq_stat);
  39. /*
  40. * Define per-tile irq disable mask; the hardware/HV only has a single
  41. * mask that we use to implement both masking and disabling.
  42. */
  43. static DEFINE_PER_CPU(unsigned long, irq_disable_mask)
  44. ____cacheline_internodealigned_in_smp;
  45. /*
  46. * Per-tile IRQ nesting depth. Used to make sure we enable newly
  47. * enabled IRQs before exiting the outermost interrupt.
  48. */
  49. static DEFINE_PER_CPU(int, irq_depth);
  50. #if CHIP_HAS_IPI()
  51. /* Use SPRs to manipulate device interrupts. */
  52. #define mask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_SET_K, irq_mask)
  53. #define unmask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_RESET_K, irq_mask)
  54. #define clear_irqs(irq_mask) __insn_mtspr(SPR_IPI_EVENT_RESET_K, irq_mask)
  55. #else
  56. /* Use HV to manipulate device interrupts. */
  57. #define mask_irqs(irq_mask) hv_disable_intr(irq_mask)
  58. #define unmask_irqs(irq_mask) hv_enable_intr(irq_mask)
  59. #define clear_irqs(irq_mask) hv_clear_intr(irq_mask)
  60. #endif
  61. /*
  62. * The interrupt handling path, implemented in terms of HV interrupt
  63. * emulation on TILEPro, and IPI hardware on TILE-Gx.
  64. * Entered with interrupts disabled.
  65. */
  66. void tile_dev_intr(struct pt_regs *regs, int intnum)
  67. {
  68. int depth = __get_cpu_var(irq_depth)++;
  69. unsigned long original_irqs;
  70. unsigned long remaining_irqs;
  71. struct pt_regs *old_regs;
  72. #if CHIP_HAS_IPI()
  73. /*
  74. * Pending interrupts are listed in an SPR. We might be
  75. * nested, so be sure to only handle irqs that weren't already
  76. * masked by a previous interrupt. Then, mask out the ones
  77. * we're going to handle.
  78. */
  79. unsigned long masked = __insn_mfspr(SPR_IPI_MASK_K);
  80. original_irqs = __insn_mfspr(SPR_IPI_EVENT_K) & ~masked;
  81. __insn_mtspr(SPR_IPI_MASK_SET_K, original_irqs);
  82. #else
  83. /*
  84. * Hypervisor performs the equivalent of the Gx code above and
  85. * then puts the pending interrupt mask into a system save reg
  86. * for us to find.
  87. */
  88. original_irqs = __insn_mfspr(SPR_SYSTEM_SAVE_K_3);
  89. #endif
  90. remaining_irqs = original_irqs;
  91. /* Track time spent here in an interrupt context. */
  92. old_regs = set_irq_regs(regs);
  93. irq_enter();
  94. #ifdef CONFIG_DEBUG_STACKOVERFLOW
  95. /* Debugging check for stack overflow: less than 1/8th stack free? */
  96. {
  97. long sp = stack_pointer - (long) current_thread_info();
  98. if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
  99. pr_emerg("tile_dev_intr: "
  100. "stack overflow: %ld\n",
  101. sp - sizeof(struct thread_info));
  102. dump_stack();
  103. }
  104. }
  105. #endif
  106. while (remaining_irqs) {
  107. unsigned long irq = __ffs(remaining_irqs);
  108. remaining_irqs &= ~(1UL << irq);
  109. /* Count device irqs; Linux IPIs are counted elsewhere. */
  110. if (irq != IRQ_RESCHEDULE)
  111. __get_cpu_var(irq_stat).irq_dev_intr_count++;
  112. generic_handle_irq(irq);
  113. }
  114. /*
  115. * If we weren't nested, turn on all enabled interrupts,
  116. * including any that were reenabled during interrupt
  117. * handling.
  118. */
  119. if (depth == 0)
  120. unmask_irqs(~__get_cpu_var(irq_disable_mask));
  121. __get_cpu_var(irq_depth)--;
  122. /*
  123. * Track time spent against the current process again and
  124. * process any softirqs if they are waiting.
  125. */
  126. irq_exit();
  127. set_irq_regs(old_regs);
  128. }
  129. /*
  130. * Remove an irq from the disabled mask. If we're in an interrupt
  131. * context, defer enabling the HW interrupt until we leave.
  132. */
  133. static void tile_irq_chip_enable(struct irq_data *d)
  134. {
  135. get_cpu_var(irq_disable_mask) &= ~(1UL << d->irq);
  136. if (__get_cpu_var(irq_depth) == 0)
  137. unmask_irqs(1UL << d->irq);
  138. put_cpu_var(irq_disable_mask);
  139. }
  140. /*
  141. * Add an irq to the disabled mask. We disable the HW interrupt
  142. * immediately so that there's no possibility of it firing. If we're
  143. * in an interrupt context, the return path is careful to avoid
  144. * unmasking a newly disabled interrupt.
  145. */
  146. static void tile_irq_chip_disable(struct irq_data *d)
  147. {
  148. get_cpu_var(irq_disable_mask) |= (1UL << d->irq);
  149. mask_irqs(1UL << d->irq);
  150. put_cpu_var(irq_disable_mask);
  151. }
  152. /* Mask an interrupt. */
  153. static void tile_irq_chip_mask(struct irq_data *d)
  154. {
  155. mask_irqs(1UL << d->irq);
  156. }
  157. /* Unmask an interrupt. */
  158. static void tile_irq_chip_unmask(struct irq_data *d)
  159. {
  160. unmask_irqs(1UL << d->irq);
  161. }
  162. /*
  163. * Clear an interrupt before processing it so that any new assertions
  164. * will trigger another irq.
  165. */
  166. static void tile_irq_chip_ack(struct irq_data *d)
  167. {
  168. if ((unsigned long)irq_data_get_irq_chip_data(d) != IS_HW_CLEARED)
  169. clear_irqs(1UL << d->irq);
  170. }
  171. /*
  172. * For per-cpu interrupts, we need to avoid unmasking any interrupts
  173. * that we disabled via disable_percpu_irq().
  174. */
  175. static void tile_irq_chip_eoi(struct irq_data *d)
  176. {
  177. if (!(__get_cpu_var(irq_disable_mask) & (1UL << d->irq)))
  178. unmask_irqs(1UL << d->irq);
  179. }
  180. static struct irq_chip tile_irq_chip = {
  181. .name = "tile_irq_chip",
  182. .irq_enable = tile_irq_chip_enable,
  183. .irq_disable = tile_irq_chip_disable,
  184. .irq_ack = tile_irq_chip_ack,
  185. .irq_eoi = tile_irq_chip_eoi,
  186. .irq_mask = tile_irq_chip_mask,
  187. .irq_unmask = tile_irq_chip_unmask,
  188. };
  189. void __init init_IRQ(void)
  190. {
  191. ipi_init();
  192. }
  193. void setup_irq_regs(void)
  194. {
  195. /* Enable interrupt delivery. */
  196. unmask_irqs(~0UL);
  197. #if CHIP_HAS_IPI()
  198. arch_local_irq_unmask(INT_IPI_K);
  199. #endif
  200. }
  201. void tile_irq_activate(unsigned int irq, int tile_irq_type)
  202. {
  203. /*
  204. * We use handle_level_irq() by default because the pending
  205. * interrupt vector (whether modeled by the HV on
  206. * TILEPro or implemented in hardware on TILE-Gx) has
  207. * level-style semantics for each bit. An interrupt fires
  208. * whenever a bit is high, not just at edges.
  209. */
  210. irq_flow_handler_t handle = handle_level_irq;
  211. if (tile_irq_type == TILE_IRQ_PERCPU)
  212. handle = handle_percpu_irq;
  213. irq_set_chip_and_handler(irq, &tile_irq_chip, handle);
  214. /*
  215. * Flag interrupts that are hardware-cleared so that ack()
  216. * won't clear them.
  217. */
  218. if (tile_irq_type == TILE_IRQ_HW_CLEAR)
  219. irq_set_chip_data(irq, (void *)IS_HW_CLEARED);
  220. }
  221. EXPORT_SYMBOL(tile_irq_activate);
  222. void ack_bad_irq(unsigned int irq)
  223. {
  224. pr_err("unexpected IRQ trap at vector %02x\n", irq);
  225. }
  226. /*
  227. * /proc/interrupts printing:
  228. */
  229. int arch_show_interrupts(struct seq_file *p, int prec)
  230. {
  231. #ifdef CONFIG_PERF_EVENTS
  232. int i;
  233. seq_printf(p, "%*s: ", prec, "PMI");
  234. for_each_online_cpu(i)
  235. seq_printf(p, "%10llu ", per_cpu(perf_irqs, i));
  236. seq_puts(p, " perf_events\n");
  237. #endif
  238. return 0;
  239. }
  240. #if CHIP_HAS_IPI()
  241. int arch_setup_hwirq(unsigned int irq, int node)
  242. {
  243. return irq >= NR_IRQS ? -EINVAL : 0;
  244. }
  245. void arch_teardown_hwirq(unsigned int irq) { }
  246. #endif