irq.c 8.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315
  1. /*
  2. * Copyright 2010 Tilera Corporation. All Rights Reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation, version 2.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11. * NON INFRINGEMENT. See the GNU General Public License for
  12. * more details.
  13. */
  14. #include <linux/module.h>
  15. #include <linux/seq_file.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/irq.h>
  18. #include <linux/kernel_stat.h>
  19. #include <linux/uaccess.h>
  20. #include <hv/drv_pcie_rc_intf.h>
  21. #include <arch/spr_def.h>
  22. #include <asm/traps.h>
  23. #include <linux/perf_event.h>
  24. /* Bit-flag stored in irq_desc->chip_data to indicate HW-cleared irqs. */
  25. #define IS_HW_CLEARED 1
  26. /*
  27. * The set of interrupts we enable for arch_local_irq_enable().
  28. * This is initialized to have just a single interrupt that the kernel
  29. * doesn't actually use as a sentinel. During kernel init,
  30. * interrupts are added as the kernel gets prepared to support them.
  31. * NOTE: we could probably initialize them all statically up front.
  32. */
  33. DEFINE_PER_CPU(unsigned long long, interrupts_enabled_mask) =
  34. INITIAL_INTERRUPTS_ENABLED;
  35. EXPORT_PER_CPU_SYMBOL(interrupts_enabled_mask);
  36. /* Define per-tile device interrupt statistics state. */
  37. DEFINE_PER_CPU(irq_cpustat_t, irq_stat) ____cacheline_internodealigned_in_smp;
  38. EXPORT_PER_CPU_SYMBOL(irq_stat);
  39. /*
  40. * Define per-tile irq disable mask; the hardware/HV only has a single
  41. * mask that we use to implement both masking and disabling.
  42. */
  43. static DEFINE_PER_CPU(unsigned long, irq_disable_mask)
  44. ____cacheline_internodealigned_in_smp;
  45. /*
  46. * Per-tile IRQ nesting depth. Used to make sure we enable newly
  47. * enabled IRQs before exiting the outermost interrupt.
  48. */
  49. static DEFINE_PER_CPU(int, irq_depth);
  50. /* State for allocating IRQs on Gx. */
  51. #if CHIP_HAS_IPI()
  52. static unsigned long available_irqs = ((1UL << NR_IRQS) - 1) &
  53. (~(1UL << IRQ_RESCHEDULE));
  54. static DEFINE_SPINLOCK(available_irqs_lock);
  55. #endif
  56. #if CHIP_HAS_IPI()
  57. /* Use SPRs to manipulate device interrupts. */
  58. #define mask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_SET_K, irq_mask)
  59. #define unmask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_RESET_K, irq_mask)
  60. #define clear_irqs(irq_mask) __insn_mtspr(SPR_IPI_EVENT_RESET_K, irq_mask)
  61. #else
  62. /* Use HV to manipulate device interrupts. */
  63. #define mask_irqs(irq_mask) hv_disable_intr(irq_mask)
  64. #define unmask_irqs(irq_mask) hv_enable_intr(irq_mask)
  65. #define clear_irqs(irq_mask) hv_clear_intr(irq_mask)
  66. #endif
  67. /*
  68. * The interrupt handling path, implemented in terms of HV interrupt
  69. * emulation on TILEPro, and IPI hardware on TILE-Gx.
  70. * Entered with interrupts disabled.
  71. */
  72. void tile_dev_intr(struct pt_regs *regs, int intnum)
  73. {
  74. int depth = __get_cpu_var(irq_depth)++;
  75. unsigned long original_irqs;
  76. unsigned long remaining_irqs;
  77. struct pt_regs *old_regs;
  78. #if CHIP_HAS_IPI()
  79. /*
  80. * Pending interrupts are listed in an SPR. We might be
  81. * nested, so be sure to only handle irqs that weren't already
  82. * masked by a previous interrupt. Then, mask out the ones
  83. * we're going to handle.
  84. */
  85. unsigned long masked = __insn_mfspr(SPR_IPI_MASK_K);
  86. original_irqs = __insn_mfspr(SPR_IPI_EVENT_K) & ~masked;
  87. __insn_mtspr(SPR_IPI_MASK_SET_K, original_irqs);
  88. #else
  89. /*
  90. * Hypervisor performs the equivalent of the Gx code above and
  91. * then puts the pending interrupt mask into a system save reg
  92. * for us to find.
  93. */
  94. original_irqs = __insn_mfspr(SPR_SYSTEM_SAVE_K_3);
  95. #endif
  96. remaining_irqs = original_irqs;
  97. /* Track time spent here in an interrupt context. */
  98. old_regs = set_irq_regs(regs);
  99. irq_enter();
  100. #ifdef CONFIG_DEBUG_STACKOVERFLOW
  101. /* Debugging check for stack overflow: less than 1/8th stack free? */
  102. {
  103. long sp = stack_pointer - (long) current_thread_info();
  104. if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
  105. pr_emerg("tile_dev_intr: "
  106. "stack overflow: %ld\n",
  107. sp - sizeof(struct thread_info));
  108. dump_stack();
  109. }
  110. }
  111. #endif
  112. while (remaining_irqs) {
  113. unsigned long irq = __ffs(remaining_irqs);
  114. remaining_irqs &= ~(1UL << irq);
  115. /* Count device irqs; Linux IPIs are counted elsewhere. */
  116. if (irq != IRQ_RESCHEDULE)
  117. __get_cpu_var(irq_stat).irq_dev_intr_count++;
  118. generic_handle_irq(irq);
  119. }
  120. /*
  121. * If we weren't nested, turn on all enabled interrupts,
  122. * including any that were reenabled during interrupt
  123. * handling.
  124. */
  125. if (depth == 0)
  126. unmask_irqs(~__get_cpu_var(irq_disable_mask));
  127. __get_cpu_var(irq_depth)--;
  128. /*
  129. * Track time spent against the current process again and
  130. * process any softirqs if they are waiting.
  131. */
  132. irq_exit();
  133. set_irq_regs(old_regs);
  134. }
  135. /*
  136. * Remove an irq from the disabled mask. If we're in an interrupt
  137. * context, defer enabling the HW interrupt until we leave.
  138. */
  139. static void tile_irq_chip_enable(struct irq_data *d)
  140. {
  141. get_cpu_var(irq_disable_mask) &= ~(1UL << d->irq);
  142. if (__get_cpu_var(irq_depth) == 0)
  143. unmask_irqs(1UL << d->irq);
  144. put_cpu_var(irq_disable_mask);
  145. }
  146. /*
  147. * Add an irq to the disabled mask. We disable the HW interrupt
  148. * immediately so that there's no possibility of it firing. If we're
  149. * in an interrupt context, the return path is careful to avoid
  150. * unmasking a newly disabled interrupt.
  151. */
  152. static void tile_irq_chip_disable(struct irq_data *d)
  153. {
  154. get_cpu_var(irq_disable_mask) |= (1UL << d->irq);
  155. mask_irqs(1UL << d->irq);
  156. put_cpu_var(irq_disable_mask);
  157. }
  158. /* Mask an interrupt. */
  159. static void tile_irq_chip_mask(struct irq_data *d)
  160. {
  161. mask_irqs(1UL << d->irq);
  162. }
  163. /* Unmask an interrupt. */
  164. static void tile_irq_chip_unmask(struct irq_data *d)
  165. {
  166. unmask_irqs(1UL << d->irq);
  167. }
  168. /*
  169. * Clear an interrupt before processing it so that any new assertions
  170. * will trigger another irq.
  171. */
  172. static void tile_irq_chip_ack(struct irq_data *d)
  173. {
  174. if ((unsigned long)irq_data_get_irq_chip_data(d) != IS_HW_CLEARED)
  175. clear_irqs(1UL << d->irq);
  176. }
  177. /*
  178. * For per-cpu interrupts, we need to avoid unmasking any interrupts
  179. * that we disabled via disable_percpu_irq().
  180. */
  181. static void tile_irq_chip_eoi(struct irq_data *d)
  182. {
  183. if (!(__get_cpu_var(irq_disable_mask) & (1UL << d->irq)))
  184. unmask_irqs(1UL << d->irq);
  185. }
  186. static struct irq_chip tile_irq_chip = {
  187. .name = "tile_irq_chip",
  188. .irq_enable = tile_irq_chip_enable,
  189. .irq_disable = tile_irq_chip_disable,
  190. .irq_ack = tile_irq_chip_ack,
  191. .irq_eoi = tile_irq_chip_eoi,
  192. .irq_mask = tile_irq_chip_mask,
  193. .irq_unmask = tile_irq_chip_unmask,
  194. };
  195. void __init init_IRQ(void)
  196. {
  197. ipi_init();
  198. }
  199. void setup_irq_regs(void)
  200. {
  201. /* Enable interrupt delivery. */
  202. unmask_irqs(~0UL);
  203. #if CHIP_HAS_IPI()
  204. arch_local_irq_unmask(INT_IPI_K);
  205. #endif
  206. }
  207. void tile_irq_activate(unsigned int irq, int tile_irq_type)
  208. {
  209. /*
  210. * We use handle_level_irq() by default because the pending
  211. * interrupt vector (whether modeled by the HV on
  212. * TILEPro or implemented in hardware on TILE-Gx) has
  213. * level-style semantics for each bit. An interrupt fires
  214. * whenever a bit is high, not just at edges.
  215. */
  216. irq_flow_handler_t handle = handle_level_irq;
  217. if (tile_irq_type == TILE_IRQ_PERCPU)
  218. handle = handle_percpu_irq;
  219. irq_set_chip_and_handler(irq, &tile_irq_chip, handle);
  220. /*
  221. * Flag interrupts that are hardware-cleared so that ack()
  222. * won't clear them.
  223. */
  224. if (tile_irq_type == TILE_IRQ_HW_CLEAR)
  225. irq_set_chip_data(irq, (void *)IS_HW_CLEARED);
  226. }
  227. EXPORT_SYMBOL(tile_irq_activate);
  228. void ack_bad_irq(unsigned int irq)
  229. {
  230. pr_err("unexpected IRQ trap at vector %02x\n", irq);
  231. }
  232. /*
  233. * /proc/interrupts printing:
  234. */
  235. int arch_show_interrupts(struct seq_file *p, int prec)
  236. {
  237. #ifdef CONFIG_PERF_EVENTS
  238. int i;
  239. seq_printf(p, "%*s: ", prec, "PMI");
  240. for_each_online_cpu(i)
  241. seq_printf(p, "%10llu ", per_cpu(perf_irqs, i));
  242. seq_puts(p, " perf_events\n");
  243. #endif
  244. return 0;
  245. }
  246. /*
  247. * Generic, controller-independent functions:
  248. */
  249. #if CHIP_HAS_IPI()
  250. int create_irq(void)
  251. {
  252. unsigned long flags;
  253. int result;
  254. spin_lock_irqsave(&available_irqs_lock, flags);
  255. if (available_irqs == 0)
  256. result = -ENOMEM;
  257. else {
  258. result = __ffs(available_irqs);
  259. available_irqs &= ~(1UL << result);
  260. dynamic_irq_init(result);
  261. }
  262. spin_unlock_irqrestore(&available_irqs_lock, flags);
  263. return result;
  264. }
  265. EXPORT_SYMBOL(create_irq);
  266. void destroy_irq(unsigned int irq)
  267. {
  268. unsigned long flags;
  269. spin_lock_irqsave(&available_irqs_lock, flags);
  270. available_irqs |= (1UL << irq);
  271. dynamic_irq_cleanup(irq);
  272. spin_unlock_irqrestore(&available_irqs_lock, flags);
  273. }
  274. EXPORT_SYMBOL(destroy_irq);
  275. #endif