hw_irq.h 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
  4. */
  5. #ifndef _ASM_POWERPC_HW_IRQ_H
  6. #define _ASM_POWERPC_HW_IRQ_H
  7. #ifdef __KERNEL__
  8. #include <linux/errno.h>
  9. #include <linux/compiler.h>
  10. #include <asm/ptrace.h>
  11. #include <asm/processor.h>
  12. #ifdef CONFIG_PPC64
  13. /*
  14. * PACA flags in paca->irq_happened.
  15. *
  16. * This bits are set when interrupts occur while soft-disabled
  17. * and allow a proper replay. Additionally, PACA_IRQ_HARD_DIS
  18. * is set whenever we manually hard disable.
  19. */
  20. #define PACA_IRQ_HARD_DIS 0x01
  21. #define PACA_IRQ_DBELL 0x02
  22. #define PACA_IRQ_EE 0x04
  23. #define PACA_IRQ_DEC 0x08 /* Or FIT */
  24. #define PACA_IRQ_EE_EDGE 0x10 /* BookE only */
  25. #define PACA_IRQ_HMI 0x20
  26. /*
  27. * flags for paca->soft_enabled
  28. */
  29. #define IRQS_ENABLED 0
  30. #define IRQS_DISABLED 1
  31. #endif /* CONFIG_PPC64 */
  32. #ifndef __ASSEMBLY__
  33. extern void replay_system_reset(void);
  34. extern void __replay_interrupt(unsigned int vector);
  35. extern void timer_interrupt(struct pt_regs *);
  36. extern void performance_monitor_exception(struct pt_regs *regs);
  37. extern void WatchdogException(struct pt_regs *regs);
  38. extern void unknown_exception(struct pt_regs *regs);
  39. #ifdef CONFIG_PPC64
  40. #include <asm/paca.h>
  41. static inline notrace unsigned long soft_enabled_return(void)
  42. {
  43. unsigned long flags;
  44. asm volatile(
  45. "lbz %0,%1(13)"
  46. : "=r" (flags)
  47. : "i" (offsetof(struct paca_struct, soft_enabled)));
  48. return flags;
  49. }
  50. /*
  51. * The "memory" clobber acts as both a compiler barrier
  52. * for the critical section and as a clobber because
  53. * we changed paca->soft_enabled
  54. */
  55. static inline notrace void soft_enabled_set(unsigned long enable)
  56. {
  57. #ifdef CONFIG_TRACE_IRQFLAGS
  58. /*
  59. * mask must always include LINUX bit if any are set, and
  60. * interrupts don't get replayed until the Linux interrupt is
  61. * unmasked. This could be changed to replay partial unmasks
  62. * in future, which would allow Linux masks to nest inside
  63. * other masks, among other things. For now, be very dumb and
  64. * simple.
  65. */
  66. WARN_ON(mask && !(mask & IRQS_DISABLED));
  67. #endif
  68. asm volatile(
  69. "stb %0,%1(13)"
  70. :
  71. : "r" (enable),
  72. "i" (offsetof(struct paca_struct, soft_enabled))
  73. : "memory");
  74. }
  75. static inline notrace unsigned long soft_enabled_set_return(unsigned long mask)
  76. {
  77. unsigned long flags;
  78. #ifdef CONFIG_TRACE_IRQFLAGS
  79. WARN_ON(mask && !(mask & IRQS_DISABLED));
  80. #endif
  81. asm volatile(
  82. "lbz %0,%1(13); stb %2,%1(13)"
  83. : "=&r" (flags)
  84. : "i" (offsetof(struct paca_struct, soft_enabled)),
  85. "r" (mask)
  86. : "memory");
  87. return flags;
  88. }
  89. static inline unsigned long arch_local_save_flags(void)
  90. {
  91. return soft_enabled_return();
  92. }
  93. static inline void arch_local_irq_disable(void)
  94. {
  95. soft_enabled_set(IRQS_DISABLED);
  96. }
  97. extern void arch_local_irq_restore(unsigned long);
  98. static inline void arch_local_irq_enable(void)
  99. {
  100. arch_local_irq_restore(IRQS_ENABLED);
  101. }
  102. static inline unsigned long arch_local_irq_save(void)
  103. {
  104. return soft_enabled_set_return(IRQS_DISABLED);
  105. }
  106. static inline bool arch_irqs_disabled_flags(unsigned long flags)
  107. {
  108. return flags & IRQS_DISABLED;
  109. }
  110. static inline bool arch_irqs_disabled(void)
  111. {
  112. return arch_irqs_disabled_flags(arch_local_save_flags());
  113. }
  114. #ifdef CONFIG_PPC_BOOK3E
  115. #define __hard_irq_enable() asm volatile("wrteei 1" : : : "memory")
  116. #define __hard_irq_disable() asm volatile("wrteei 0" : : : "memory")
  117. #else
  118. #define __hard_irq_enable() __mtmsrd(local_paca->kernel_msr | MSR_EE, 1)
  119. #define __hard_irq_disable() __mtmsrd(local_paca->kernel_msr, 1)
  120. #endif
  121. #define hard_irq_disable() do { \
  122. unsigned long flags; \
  123. __hard_irq_disable(); \
  124. flags = soft_enabled_set_return(IRQS_DISABLED);\
  125. local_paca->irq_happened |= PACA_IRQ_HARD_DIS; \
  126. if (!arch_irqs_disabled_flags(flags)) \
  127. trace_hardirqs_off(); \
  128. } while(0)
  129. static inline bool lazy_irq_pending(void)
  130. {
  131. return !!(get_paca()->irq_happened & ~PACA_IRQ_HARD_DIS);
  132. }
  133. /*
  134. * This is called by asynchronous interrupts to conditionally
  135. * re-enable hard interrupts when soft-disabled after having
  136. * cleared the source of the interrupt
  137. */
  138. static inline void may_hard_irq_enable(void)
  139. {
  140. get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS;
  141. if (!(get_paca()->irq_happened & PACA_IRQ_EE))
  142. __hard_irq_enable();
  143. }
  144. static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
  145. {
  146. return (regs->softe & IRQS_DISABLED);
  147. }
  148. extern bool prep_irq_for_idle(void);
  149. extern bool prep_irq_for_idle_irqsoff(void);
  150. extern void irq_set_pending_from_srr1(unsigned long srr1);
  151. #define fini_irq_for_idle_irqsoff() trace_hardirqs_off();
  152. extern void force_external_irq_replay(void);
  153. #else /* CONFIG_PPC64 */
  154. #define SET_MSR_EE(x) mtmsr(x)
  155. static inline unsigned long arch_local_save_flags(void)
  156. {
  157. return mfmsr();
  158. }
  159. static inline void arch_local_irq_restore(unsigned long flags)
  160. {
  161. #if defined(CONFIG_BOOKE)
  162. asm volatile("wrtee %0" : : "r" (flags) : "memory");
  163. #else
  164. mtmsr(flags);
  165. #endif
  166. }
  167. static inline unsigned long arch_local_irq_save(void)
  168. {
  169. unsigned long flags = arch_local_save_flags();
  170. #ifdef CONFIG_BOOKE
  171. asm volatile("wrteei 0" : : : "memory");
  172. #elif defined(CONFIG_PPC_8xx)
  173. wrtspr(SPRN_EID);
  174. #else
  175. SET_MSR_EE(flags & ~MSR_EE);
  176. #endif
  177. return flags;
  178. }
  179. static inline void arch_local_irq_disable(void)
  180. {
  181. #ifdef CONFIG_BOOKE
  182. asm volatile("wrteei 0" : : : "memory");
  183. #elif defined(CONFIG_PPC_8xx)
  184. wrtspr(SPRN_EID);
  185. #else
  186. arch_local_irq_save();
  187. #endif
  188. }
  189. static inline void arch_local_irq_enable(void)
  190. {
  191. #ifdef CONFIG_BOOKE
  192. asm volatile("wrteei 1" : : : "memory");
  193. #elif defined(CONFIG_PPC_8xx)
  194. wrtspr(SPRN_EIE);
  195. #else
  196. unsigned long msr = mfmsr();
  197. SET_MSR_EE(msr | MSR_EE);
  198. #endif
  199. }
  200. static inline bool arch_irqs_disabled_flags(unsigned long flags)
  201. {
  202. return (flags & MSR_EE) == 0;
  203. }
  204. static inline bool arch_irqs_disabled(void)
  205. {
  206. return arch_irqs_disabled_flags(arch_local_save_flags());
  207. }
  208. #define hard_irq_disable() arch_local_irq_disable()
  209. static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
  210. {
  211. return !(regs->msr & MSR_EE);
  212. }
  213. static inline void may_hard_irq_enable(void) { }
  214. #endif /* CONFIG_PPC64 */
  215. #define ARCH_IRQ_INIT_FLAGS IRQ_NOREQUEST
  216. /*
  217. * interrupt-retrigger: should we handle this via lost interrupts and IPIs
  218. * or should we not care like we do now ? --BenH.
  219. */
  220. struct irq_chip;
  221. #endif /* __ASSEMBLY__ */
  222. #endif /* __KERNEL__ */
  223. #endif /* _ASM_POWERPC_HW_IRQ_H */