hw_irq.h 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
  4. */
  5. #ifndef _ASM_POWERPC_HW_IRQ_H
  6. #define _ASM_POWERPC_HW_IRQ_H
  7. #ifdef __KERNEL__
  8. #include <linux/errno.h>
  9. #include <linux/compiler.h>
  10. #include <asm/ptrace.h>
  11. #include <asm/processor.h>
  12. #ifdef CONFIG_PPC64
  13. /*
  14. * PACA flags in paca->irq_happened.
  15. *
  16. * This bits are set when interrupts occur while soft-disabled
  17. * and allow a proper replay. Additionally, PACA_IRQ_HARD_DIS
  18. * is set whenever we manually hard disable.
  19. */
  20. #define PACA_IRQ_HARD_DIS 0x01
  21. #define PACA_IRQ_DBELL 0x02
  22. #define PACA_IRQ_EE 0x04
  23. #define PACA_IRQ_DEC 0x08 /* Or FIT */
  24. #define PACA_IRQ_EE_EDGE 0x10 /* BookE only */
  25. #define PACA_IRQ_HMI 0x20
  26. #define PACA_IRQ_PMI 0x40
  27. /*
  28. * flags for paca->irq_soft_mask
  29. */
  30. #define IRQS_ENABLED 0
  31. #define IRQS_DISABLED 1 /* local_irq_disable() interrupts */
  32. #define IRQS_PMI_DISABLED 2
  33. #define IRQS_ALL_DISABLED (IRQS_DISABLED | IRQS_PMI_DISABLED)
  34. #endif /* CONFIG_PPC64 */
  35. #ifndef __ASSEMBLY__
  36. extern void replay_system_reset(void);
  37. extern void __replay_interrupt(unsigned int vector);
  38. extern void timer_interrupt(struct pt_regs *);
  39. extern void performance_monitor_exception(struct pt_regs *regs);
  40. extern void WatchdogException(struct pt_regs *regs);
  41. extern void unknown_exception(struct pt_regs *regs);
  42. #ifdef CONFIG_PPC64
  43. #include <asm/paca.h>
  44. static inline notrace unsigned long irq_soft_mask_return(void)
  45. {
  46. unsigned long flags;
  47. asm volatile(
  48. "lbz %0,%1(13)"
  49. : "=r" (flags)
  50. : "i" (offsetof(struct paca_struct, irq_soft_mask)));
  51. return flags;
  52. }
  53. /*
  54. * The "memory" clobber acts as both a compiler barrier
  55. * for the critical section and as a clobber because
  56. * we changed paca->irq_soft_mask
  57. */
  58. static inline notrace void irq_soft_mask_set(unsigned long mask)
  59. {
  60. #ifdef CONFIG_TRACE_IRQFLAGS
  61. /*
  62. * The irq mask must always include the STD bit if any are set.
  63. *
  64. * and interrupts don't get replayed until the standard
  65. * interrupt (local_irq_disable()) is unmasked.
  66. *
  67. * Other masks must only provide additional masking beyond
  68. * the standard, and they are also not replayed until the
  69. * standard interrupt becomes unmasked.
  70. *
  71. * This could be changed, but it will require partial
  72. * unmasks to be replayed, among other things. For now, take
  73. * the simple approach.
  74. */
  75. WARN_ON(mask && !(mask & IRQS_DISABLED));
  76. #endif
  77. asm volatile(
  78. "stb %0,%1(13)"
  79. :
  80. : "r" (mask),
  81. "i" (offsetof(struct paca_struct, irq_soft_mask))
  82. : "memory");
  83. }
  84. static inline notrace unsigned long irq_soft_mask_set_return(unsigned long mask)
  85. {
  86. unsigned long flags;
  87. #ifdef CONFIG_TRACE_IRQFLAGS
  88. WARN_ON(mask && !(mask & IRQS_DISABLED));
  89. #endif
  90. asm volatile(
  91. "lbz %0,%1(13); stb %2,%1(13)"
  92. : "=&r" (flags)
  93. : "i" (offsetof(struct paca_struct, irq_soft_mask)),
  94. "r" (mask)
  95. : "memory");
  96. return flags;
  97. }
  98. static inline unsigned long arch_local_save_flags(void)
  99. {
  100. return irq_soft_mask_return();
  101. }
  102. static inline void arch_local_irq_disable(void)
  103. {
  104. irq_soft_mask_set(IRQS_DISABLED);
  105. }
  106. extern void arch_local_irq_restore(unsigned long);
  107. static inline void arch_local_irq_enable(void)
  108. {
  109. arch_local_irq_restore(IRQS_ENABLED);
  110. }
  111. static inline unsigned long arch_local_irq_save(void)
  112. {
  113. return irq_soft_mask_set_return(IRQS_DISABLED);
  114. }
  115. static inline bool arch_irqs_disabled_flags(unsigned long flags)
  116. {
  117. return flags & IRQS_DISABLED;
  118. }
  119. static inline bool arch_irqs_disabled(void)
  120. {
  121. return arch_irqs_disabled_flags(arch_local_save_flags());
  122. }
  123. #ifdef CONFIG_PPC_BOOK3E
  124. #define __hard_irq_enable() asm volatile("wrteei 1" : : : "memory")
  125. #define __hard_irq_disable() asm volatile("wrteei 0" : : : "memory")
  126. #else
  127. #define __hard_irq_enable() __mtmsrd(local_paca->kernel_msr | MSR_EE, 1)
  128. #define __hard_irq_disable() __mtmsrd(local_paca->kernel_msr, 1)
  129. #endif
  130. #define hard_irq_disable() do { \
  131. unsigned long flags; \
  132. __hard_irq_disable(); \
  133. flags = irq_soft_mask_set_return(IRQS_ALL_DISABLED); \
  134. local_paca->irq_happened |= PACA_IRQ_HARD_DIS; \
  135. if (!arch_irqs_disabled_flags(flags)) \
  136. trace_hardirqs_off(); \
  137. } while(0)
  138. static inline bool lazy_irq_pending(void)
  139. {
  140. return !!(get_paca()->irq_happened & ~PACA_IRQ_HARD_DIS);
  141. }
  142. /*
  143. * This is called by asynchronous interrupts to conditionally
  144. * re-enable hard interrupts when soft-disabled after having
  145. * cleared the source of the interrupt
  146. */
  147. static inline void may_hard_irq_enable(void)
  148. {
  149. get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS;
  150. if (!(get_paca()->irq_happened & PACA_IRQ_EE))
  151. __hard_irq_enable();
  152. }
  153. static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
  154. {
  155. return (regs->softe & IRQS_DISABLED);
  156. }
  157. extern bool prep_irq_for_idle(void);
  158. extern bool prep_irq_for_idle_irqsoff(void);
  159. extern void irq_set_pending_from_srr1(unsigned long srr1);
  160. #define fini_irq_for_idle_irqsoff() trace_hardirqs_off();
  161. extern void force_external_irq_replay(void);
  162. #else /* CONFIG_PPC64 */
  163. #define SET_MSR_EE(x) mtmsr(x)
  164. static inline unsigned long arch_local_save_flags(void)
  165. {
  166. return mfmsr();
  167. }
  168. static inline void arch_local_irq_restore(unsigned long flags)
  169. {
  170. #if defined(CONFIG_BOOKE)
  171. asm volatile("wrtee %0" : : "r" (flags) : "memory");
  172. #else
  173. mtmsr(flags);
  174. #endif
  175. }
  176. static inline unsigned long arch_local_irq_save(void)
  177. {
  178. unsigned long flags = arch_local_save_flags();
  179. #ifdef CONFIG_BOOKE
  180. asm volatile("wrteei 0" : : : "memory");
  181. #elif defined(CONFIG_PPC_8xx)
  182. wrtspr(SPRN_EID);
  183. #else
  184. SET_MSR_EE(flags & ~MSR_EE);
  185. #endif
  186. return flags;
  187. }
  188. static inline void arch_local_irq_disable(void)
  189. {
  190. #ifdef CONFIG_BOOKE
  191. asm volatile("wrteei 0" : : : "memory");
  192. #elif defined(CONFIG_PPC_8xx)
  193. wrtspr(SPRN_EID);
  194. #else
  195. arch_local_irq_save();
  196. #endif
  197. }
  198. static inline void arch_local_irq_enable(void)
  199. {
  200. #ifdef CONFIG_BOOKE
  201. asm volatile("wrteei 1" : : : "memory");
  202. #elif defined(CONFIG_PPC_8xx)
  203. wrtspr(SPRN_EIE);
  204. #else
  205. unsigned long msr = mfmsr();
  206. SET_MSR_EE(msr | MSR_EE);
  207. #endif
  208. }
  209. static inline bool arch_irqs_disabled_flags(unsigned long flags)
  210. {
  211. return (flags & MSR_EE) == 0;
  212. }
  213. static inline bool arch_irqs_disabled(void)
  214. {
  215. return arch_irqs_disabled_flags(arch_local_save_flags());
  216. }
  217. #define hard_irq_disable() arch_local_irq_disable()
  218. static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
  219. {
  220. return !(regs->msr & MSR_EE);
  221. }
  222. static inline void may_hard_irq_enable(void) { }
  223. #endif /* CONFIG_PPC64 */
  224. #define ARCH_IRQ_INIT_FLAGS IRQ_NOREQUEST
  225. /*
  226. * interrupt-retrigger: should we handle this via lost interrupts and IPIs
  227. * or should we not care like we do now ? --BenH.
  228. */
  229. struct irq_chip;
  230. #endif /* __ASSEMBLY__ */
  231. #endif /* __KERNEL__ */
  232. #endif /* _ASM_POWERPC_HW_IRQ_H */