hw_irq.h 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
  4. */
  5. #ifndef _ASM_POWERPC_HW_IRQ_H
  6. #define _ASM_POWERPC_HW_IRQ_H
  7. #ifdef __KERNEL__
  8. #include <linux/errno.h>
  9. #include <linux/compiler.h>
  10. #include <asm/ptrace.h>
  11. #include <asm/processor.h>
  12. #ifdef CONFIG_PPC64
  13. /*
  14. * PACA flags in paca->irq_happened.
  15. *
  16. * This bits are set when interrupts occur while soft-disabled
  17. * and allow a proper replay. Additionally, PACA_IRQ_HARD_DIS
  18. * is set whenever we manually hard disable.
  19. */
  20. #define PACA_IRQ_HARD_DIS 0x01
  21. #define PACA_IRQ_DBELL 0x02
  22. #define PACA_IRQ_EE 0x04
  23. #define PACA_IRQ_DEC 0x08 /* Or FIT */
  24. #define PACA_IRQ_EE_EDGE 0x10 /* BookE only */
  25. #define PACA_IRQ_HMI 0x20
  26. #define PACA_IRQ_PMI 0x40
  27. /*
  28. * Some soft-masked interrupts must be hard masked until they are replayed
  29. * (e.g., because the soft-masked handler does not clear the exception).
  30. */
  31. #ifdef CONFIG_PPC_BOOK3S
  32. #define PACA_IRQ_MUST_HARD_MASK (PACA_IRQ_EE|PACA_IRQ_PMI)
  33. #else
  34. #define PACA_IRQ_MUST_HARD_MASK (PACA_IRQ_EE)
  35. #endif
  36. /*
  37. * flags for paca->irq_soft_mask
  38. */
  39. #define IRQS_ENABLED 0
  40. #define IRQS_DISABLED 1 /* local_irq_disable() interrupts */
  41. #define IRQS_PMI_DISABLED 2
  42. #define IRQS_ALL_DISABLED (IRQS_DISABLED | IRQS_PMI_DISABLED)
  43. #endif /* CONFIG_PPC64 */
  44. #ifndef __ASSEMBLY__
  45. extern void replay_system_reset(void);
  46. extern void __replay_interrupt(unsigned int vector);
  47. extern void timer_interrupt(struct pt_regs *);
  48. extern void timer_broadcast_interrupt(void);
  49. extern void performance_monitor_exception(struct pt_regs *regs);
  50. extern void WatchdogException(struct pt_regs *regs);
  51. extern void unknown_exception(struct pt_regs *regs);
  52. #ifdef CONFIG_PPC64
  53. #include <asm/paca.h>
  54. static inline notrace unsigned long irq_soft_mask_return(void)
  55. {
  56. unsigned long flags;
  57. asm volatile(
  58. "lbz %0,%1(13)"
  59. : "=r" (flags)
  60. : "i" (offsetof(struct paca_struct, irq_soft_mask)));
  61. return flags;
  62. }
  63. /*
  64. * The "memory" clobber acts as both a compiler barrier
  65. * for the critical section and as a clobber because
  66. * we changed paca->irq_soft_mask
  67. */
  68. static inline notrace void irq_soft_mask_set(unsigned long mask)
  69. {
  70. #ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
  71. /*
  72. * The irq mask must always include the STD bit if any are set.
  73. *
  74. * and interrupts don't get replayed until the standard
  75. * interrupt (local_irq_disable()) is unmasked.
  76. *
  77. * Other masks must only provide additional masking beyond
  78. * the standard, and they are also not replayed until the
  79. * standard interrupt becomes unmasked.
  80. *
  81. * This could be changed, but it will require partial
  82. * unmasks to be replayed, among other things. For now, take
  83. * the simple approach.
  84. */
  85. WARN_ON(mask && !(mask & IRQS_DISABLED));
  86. #endif
  87. asm volatile(
  88. "stb %0,%1(13)"
  89. :
  90. : "r" (mask),
  91. "i" (offsetof(struct paca_struct, irq_soft_mask))
  92. : "memory");
  93. }
  94. static inline notrace unsigned long irq_soft_mask_set_return(unsigned long mask)
  95. {
  96. unsigned long flags;
  97. #ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
  98. WARN_ON(mask && !(mask & IRQS_DISABLED));
  99. #endif
  100. asm volatile(
  101. "lbz %0,%1(13); stb %2,%1(13)"
  102. : "=&r" (flags)
  103. : "i" (offsetof(struct paca_struct, irq_soft_mask)),
  104. "r" (mask)
  105. : "memory");
  106. return flags;
  107. }
  108. static inline notrace unsigned long irq_soft_mask_or_return(unsigned long mask)
  109. {
  110. unsigned long flags, tmp;
  111. asm volatile(
  112. "lbz %0,%2(13); or %1,%0,%3; stb %1,%2(13)"
  113. : "=&r" (flags), "=r" (tmp)
  114. : "i" (offsetof(struct paca_struct, irq_soft_mask)),
  115. "r" (mask)
  116. : "memory");
  117. #ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
  118. WARN_ON((mask | flags) && !((mask | flags) & IRQS_DISABLED));
  119. #endif
  120. return flags;
  121. }
  122. static inline unsigned long arch_local_save_flags(void)
  123. {
  124. return irq_soft_mask_return();
  125. }
  126. static inline void arch_local_irq_disable(void)
  127. {
  128. irq_soft_mask_set(IRQS_DISABLED);
  129. }
  130. extern void arch_local_irq_restore(unsigned long);
  131. static inline void arch_local_irq_enable(void)
  132. {
  133. arch_local_irq_restore(IRQS_ENABLED);
  134. }
  135. static inline unsigned long arch_local_irq_save(void)
  136. {
  137. return irq_soft_mask_set_return(IRQS_DISABLED);
  138. }
  139. static inline bool arch_irqs_disabled_flags(unsigned long flags)
  140. {
  141. return flags & IRQS_DISABLED;
  142. }
  143. static inline bool arch_irqs_disabled(void)
  144. {
  145. return arch_irqs_disabled_flags(arch_local_save_flags());
  146. }
  147. #ifdef CONFIG_PPC_BOOK3S
  148. /*
  149. * To support disabling and enabling of irq with PMI, set of
  150. * new powerpc_local_irq_pmu_save() and powerpc_local_irq_restore()
  151. * functions are added. These macros are implemented using generic
  152. * linux local_irq_* code from include/linux/irqflags.h.
  153. */
  154. #define raw_local_irq_pmu_save(flags) \
  155. do { \
  156. typecheck(unsigned long, flags); \
  157. flags = irq_soft_mask_or_return(IRQS_DISABLED | \
  158. IRQS_PMI_DISABLED); \
  159. } while(0)
  160. #define raw_local_irq_pmu_restore(flags) \
  161. do { \
  162. typecheck(unsigned long, flags); \
  163. arch_local_irq_restore(flags); \
  164. } while(0)
  165. #ifdef CONFIG_TRACE_IRQFLAGS
  166. #define powerpc_local_irq_pmu_save(flags) \
  167. do { \
  168. raw_local_irq_pmu_save(flags); \
  169. trace_hardirqs_off(); \
  170. } while(0)
  171. #define powerpc_local_irq_pmu_restore(flags) \
  172. do { \
  173. if (raw_irqs_disabled_flags(flags)) { \
  174. raw_local_irq_pmu_restore(flags); \
  175. trace_hardirqs_off(); \
  176. } else { \
  177. trace_hardirqs_on(); \
  178. raw_local_irq_pmu_restore(flags); \
  179. } \
  180. } while(0)
  181. #else
  182. #define powerpc_local_irq_pmu_save(flags) \
  183. do { \
  184. raw_local_irq_pmu_save(flags); \
  185. } while(0)
  186. #define powerpc_local_irq_pmu_restore(flags) \
  187. do { \
  188. raw_local_irq_pmu_restore(flags); \
  189. } while (0)
  190. #endif /* CONFIG_TRACE_IRQFLAGS */
  191. #endif /* CONFIG_PPC_BOOK3S */
  192. #ifdef CONFIG_PPC_BOOK3E
  193. #define __hard_irq_enable() asm volatile("wrteei 1" : : : "memory")
  194. #define __hard_irq_disable() asm volatile("wrteei 0" : : : "memory")
  195. #else
  196. #define __hard_irq_enable() __mtmsrd(MSR_EE|MSR_RI, 1)
  197. #define __hard_irq_disable() __mtmsrd(MSR_RI, 1)
  198. #endif
  199. #define hard_irq_disable() do { \
  200. unsigned long flags; \
  201. __hard_irq_disable(); \
  202. flags = irq_soft_mask_set_return(IRQS_ALL_DISABLED); \
  203. local_paca->irq_happened |= PACA_IRQ_HARD_DIS; \
  204. if (!arch_irqs_disabled_flags(flags)) { \
  205. asm ("stdx %%r1, 0, %1 ;" \
  206. : "=m" (local_paca->saved_r1) \
  207. : "b" (&local_paca->saved_r1)); \
  208. trace_hardirqs_off(); \
  209. } \
  210. } while(0)
  211. static inline bool lazy_irq_pending(void)
  212. {
  213. return !!(get_paca()->irq_happened & ~PACA_IRQ_HARD_DIS);
  214. }
  215. /*
  216. * This is called by asynchronous interrupts to conditionally
  217. * re-enable hard interrupts after having cleared the source
  218. * of the interrupt. They are kept disabled if there is a different
  219. * soft-masked interrupt pending that requires hard masking.
  220. */
  221. static inline void may_hard_irq_enable(void)
  222. {
  223. if (!(get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK)) {
  224. get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS;
  225. __hard_irq_enable();
  226. }
  227. }
  228. static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
  229. {
  230. return (regs->softe & IRQS_DISABLED);
  231. }
  232. extern bool prep_irq_for_idle(void);
  233. extern bool prep_irq_for_idle_irqsoff(void);
  234. extern void irq_set_pending_from_srr1(unsigned long srr1);
  235. #define fini_irq_for_idle_irqsoff() trace_hardirqs_off();
  236. extern void force_external_irq_replay(void);
  237. #else /* CONFIG_PPC64 */
  238. #define SET_MSR_EE(x) mtmsr(x)
  239. static inline unsigned long arch_local_save_flags(void)
  240. {
  241. return mfmsr();
  242. }
  243. static inline void arch_local_irq_restore(unsigned long flags)
  244. {
  245. #if defined(CONFIG_BOOKE)
  246. asm volatile("wrtee %0" : : "r" (flags) : "memory");
  247. #else
  248. mtmsr(flags);
  249. #endif
  250. }
  251. static inline unsigned long arch_local_irq_save(void)
  252. {
  253. unsigned long flags = arch_local_save_flags();
  254. #ifdef CONFIG_BOOKE
  255. asm volatile("wrteei 0" : : : "memory");
  256. #elif defined(CONFIG_PPC_8xx)
  257. wrtspr(SPRN_EID);
  258. #else
  259. SET_MSR_EE(flags & ~MSR_EE);
  260. #endif
  261. return flags;
  262. }
  263. static inline void arch_local_irq_disable(void)
  264. {
  265. #ifdef CONFIG_BOOKE
  266. asm volatile("wrteei 0" : : : "memory");
  267. #elif defined(CONFIG_PPC_8xx)
  268. wrtspr(SPRN_EID);
  269. #else
  270. arch_local_irq_save();
  271. #endif
  272. }
  273. static inline void arch_local_irq_enable(void)
  274. {
  275. #ifdef CONFIG_BOOKE
  276. asm volatile("wrteei 1" : : : "memory");
  277. #elif defined(CONFIG_PPC_8xx)
  278. wrtspr(SPRN_EIE);
  279. #else
  280. unsigned long msr = mfmsr();
  281. SET_MSR_EE(msr | MSR_EE);
  282. #endif
  283. }
  284. static inline bool arch_irqs_disabled_flags(unsigned long flags)
  285. {
  286. return (flags & MSR_EE) == 0;
  287. }
  288. static inline bool arch_irqs_disabled(void)
  289. {
  290. return arch_irqs_disabled_flags(arch_local_save_flags());
  291. }
  292. #define hard_irq_disable() arch_local_irq_disable()
  293. static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
  294. {
  295. return !(regs->msr & MSR_EE);
  296. }
  297. static inline void may_hard_irq_enable(void) { }
  298. #endif /* CONFIG_PPC64 */
  299. #define ARCH_IRQ_INIT_FLAGS IRQ_NOREQUEST
  300. /*
  301. * interrupt-retrigger: should we handle this via lost interrupts and IPIs
  302. * or should we not care like we do now ? --BenH.
  303. */
  304. struct irq_chip;
  305. #endif /* __ASSEMBLY__ */
  306. #endif /* __KERNEL__ */
  307. #endif /* _ASM_POWERPC_HW_IRQ_H */