hw_irq.h 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225
  1. /*
  2. * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
  3. */
  4. #ifndef _ASM_POWERPC_HW_IRQ_H
  5. #define _ASM_POWERPC_HW_IRQ_H
  6. #ifdef __KERNEL__
  7. #include <linux/errno.h>
  8. #include <linux/compiler.h>
  9. #include <asm/ptrace.h>
  10. #include <asm/processor.h>
  11. #ifdef CONFIG_PPC64
  12. /*
  13. * PACA flags in paca->irq_happened.
  14. *
  15. * This bits are set when interrupts occur while soft-disabled
  16. * and allow a proper replay. Additionally, PACA_IRQ_HARD_DIS
  17. * is set whenever we manually hard disable.
  18. */
  19. #define PACA_IRQ_HARD_DIS 0x01
  20. #define PACA_IRQ_DBELL 0x02
  21. #define PACA_IRQ_EE 0x04
  22. #define PACA_IRQ_DEC 0x08 /* Or FIT */
  23. #define PACA_IRQ_EE_EDGE 0x10 /* BookE only */
  24. #define PACA_IRQ_HMI 0x20
  25. #endif /* CONFIG_PPC64 */
  26. #ifndef __ASSEMBLY__
  27. extern void replay_system_reset(void);
  28. extern void __replay_interrupt(unsigned int vector);
  29. extern void timer_interrupt(struct pt_regs *);
  30. extern void performance_monitor_exception(struct pt_regs *regs);
  31. extern void WatchdogException(struct pt_regs *regs);
  32. extern void unknown_exception(struct pt_regs *regs);
  33. #ifdef CONFIG_PPC64
  34. #include <asm/paca.h>
  35. static inline unsigned long arch_local_save_flags(void)
  36. {
  37. unsigned long flags;
  38. asm volatile(
  39. "lbz %0,%1(13)"
  40. : "=r" (flags)
  41. : "i" (offsetof(struct paca_struct, soft_enabled)));
  42. return flags;
  43. }
  44. static inline unsigned long arch_local_irq_disable(void)
  45. {
  46. unsigned long flags, zero;
  47. asm volatile(
  48. "li %1,0; lbz %0,%2(13); stb %1,%2(13)"
  49. : "=r" (flags), "=&r" (zero)
  50. : "i" (offsetof(struct paca_struct, soft_enabled))
  51. : "memory");
  52. return flags;
  53. }
  54. extern void arch_local_irq_restore(unsigned long);
  55. static inline void arch_local_irq_enable(void)
  56. {
  57. arch_local_irq_restore(1);
  58. }
  59. static inline unsigned long arch_local_irq_save(void)
  60. {
  61. return arch_local_irq_disable();
  62. }
  63. static inline bool arch_irqs_disabled_flags(unsigned long flags)
  64. {
  65. return flags == 0;
  66. }
  67. static inline bool arch_irqs_disabled(void)
  68. {
  69. return arch_irqs_disabled_flags(arch_local_save_flags());
  70. }
  71. #ifdef CONFIG_PPC_BOOK3E
  72. #define __hard_irq_enable() asm volatile("wrteei 1" : : : "memory")
  73. #define __hard_irq_disable() asm volatile("wrteei 0" : : : "memory")
  74. #else
  75. #define __hard_irq_enable() __mtmsrd(local_paca->kernel_msr | MSR_EE, 1)
  76. #define __hard_irq_disable() __mtmsrd(local_paca->kernel_msr, 1)
  77. #endif
  78. #define hard_irq_disable() do { \
  79. u8 _was_enabled; \
  80. __hard_irq_disable(); \
  81. _was_enabled = local_paca->soft_enabled; \
  82. local_paca->soft_enabled = 0; \
  83. local_paca->irq_happened |= PACA_IRQ_HARD_DIS; \
  84. if (_was_enabled) \
  85. trace_hardirqs_off(); \
  86. } while(0)
  87. static inline bool lazy_irq_pending(void)
  88. {
  89. return !!(get_paca()->irq_happened & ~PACA_IRQ_HARD_DIS);
  90. }
  91. /*
  92. * This is called by asynchronous interrupts to conditionally
  93. * re-enable hard interrupts when soft-disabled after having
  94. * cleared the source of the interrupt
  95. */
  96. static inline void may_hard_irq_enable(void)
  97. {
  98. get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS;
  99. if (!(get_paca()->irq_happened & PACA_IRQ_EE))
  100. __hard_irq_enable();
  101. }
  102. static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
  103. {
  104. return !regs->softe;
  105. }
  106. extern bool prep_irq_for_idle(void);
  107. extern bool prep_irq_for_idle_irqsoff(void);
  108. extern void irq_set_pending_from_srr1(unsigned long srr1);
  109. #define fini_irq_for_idle_irqsoff() trace_hardirqs_off();
  110. extern void force_external_irq_replay(void);
  111. #else /* CONFIG_PPC64 */
  112. #define SET_MSR_EE(x) mtmsr(x)
  113. static inline unsigned long arch_local_save_flags(void)
  114. {
  115. return mfmsr();
  116. }
  117. static inline void arch_local_irq_restore(unsigned long flags)
  118. {
  119. #if defined(CONFIG_BOOKE)
  120. asm volatile("wrtee %0" : : "r" (flags) : "memory");
  121. #else
  122. mtmsr(flags);
  123. #endif
  124. }
  125. static inline unsigned long arch_local_irq_save(void)
  126. {
  127. unsigned long flags = arch_local_save_flags();
  128. #ifdef CONFIG_BOOKE
  129. asm volatile("wrteei 0" : : : "memory");
  130. #elif defined(CONFIG_PPC_8xx)
  131. wrtspr(SPRN_EID);
  132. #else
  133. SET_MSR_EE(flags & ~MSR_EE);
  134. #endif
  135. return flags;
  136. }
  137. static inline void arch_local_irq_disable(void)
  138. {
  139. #ifdef CONFIG_BOOKE
  140. asm volatile("wrteei 0" : : : "memory");
  141. #elif defined(CONFIG_PPC_8xx)
  142. wrtspr(SPRN_EID);
  143. #else
  144. arch_local_irq_save();
  145. #endif
  146. }
  147. static inline void arch_local_irq_enable(void)
  148. {
  149. #ifdef CONFIG_BOOKE
  150. asm volatile("wrteei 1" : : : "memory");
  151. #elif defined(CONFIG_PPC_8xx)
  152. wrtspr(SPRN_EIE);
  153. #else
  154. unsigned long msr = mfmsr();
  155. SET_MSR_EE(msr | MSR_EE);
  156. #endif
  157. }
  158. static inline bool arch_irqs_disabled_flags(unsigned long flags)
  159. {
  160. return (flags & MSR_EE) == 0;
  161. }
  162. static inline bool arch_irqs_disabled(void)
  163. {
  164. return arch_irqs_disabled_flags(arch_local_save_flags());
  165. }
  166. #define hard_irq_disable() arch_local_irq_disable()
  167. static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
  168. {
  169. return !(regs->msr & MSR_EE);
  170. }
  171. static inline void may_hard_irq_enable(void) { }
  172. #endif /* CONFIG_PPC64 */
  173. #define ARCH_IRQ_INIT_FLAGS IRQ_NOREQUEST
  174. /*
  175. * interrupt-retrigger: should we handle this via lost interrupts and IPIs
  176. * or should we not care like we do now ? --BenH.
  177. */
  178. struct irq_chip;
  179. #endif /* __ASSEMBLY__ */
  180. #endif /* __KERNEL__ */
  181. #endif /* _ASM_POWERPC_HW_IRQ_H */