irqflags.h 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _X86_IRQFLAGS_H_
  3. #define _X86_IRQFLAGS_H_
  4. #include <asm/processor-flags.h>
  5. #ifndef __ASSEMBLY__
  6. /* Provide __cpuidle; we can't safely include <linux/cpu.h> */
  7. #define __cpuidle __attribute__((__section__(".cpuidle.text")))
  8. /*
  9. * Interrupt control:
  10. */
  11. static inline unsigned long native_save_fl(void)
  12. {
  13. unsigned long flags;
  14. /*
  15. * "=rm" is safe here, because "pop" adjusts the stack before
  16. * it evaluates its effective address -- this is part of the
  17. * documented behavior of the "pop" instruction.
  18. */
  19. asm volatile("# __raw_save_flags\n\t"
  20. "pushf ; pop %0"
  21. : "=rm" (flags)
  22. : /* no input */
  23. : "memory");
  24. return flags;
  25. }
  26. static inline void native_restore_fl(unsigned long flags)
  27. {
  28. asm volatile("push %0 ; popf"
  29. : /* no output */
  30. :"g" (flags)
  31. :"memory", "cc");
  32. }
  33. static inline void native_irq_disable(void)
  34. {
  35. asm volatile("cli": : :"memory");
  36. }
  37. static inline void native_irq_enable(void)
  38. {
  39. asm volatile("sti": : :"memory");
  40. }
  41. static inline __cpuidle void native_safe_halt(void)
  42. {
  43. asm volatile("sti; hlt": : :"memory");
  44. }
  45. static inline __cpuidle void native_halt(void)
  46. {
  47. asm volatile("hlt": : :"memory");
  48. }
  49. #endif
  50. #ifdef CONFIG_PARAVIRT
  51. #include <asm/paravirt.h>
  52. #else
  53. #ifndef __ASSEMBLY__
  54. #include <linux/types.h>
  55. static inline notrace unsigned long arch_local_save_flags(void)
  56. {
  57. return native_save_fl();
  58. }
  59. static inline notrace void arch_local_irq_restore(unsigned long flags)
  60. {
  61. native_restore_fl(flags);
  62. }
  63. static inline notrace void arch_local_irq_disable(void)
  64. {
  65. native_irq_disable();
  66. }
  67. static inline notrace void arch_local_irq_enable(void)
  68. {
  69. native_irq_enable();
  70. }
  71. /*
  72. * Used in the idle loop; sti takes one instruction cycle
  73. * to complete:
  74. */
  75. static inline __cpuidle void arch_safe_halt(void)
  76. {
  77. native_safe_halt();
  78. }
  79. /*
  80. * Used when interrupts are already enabled or to
  81. * shutdown the processor:
  82. */
  83. static inline __cpuidle void halt(void)
  84. {
  85. native_halt();
  86. }
  87. /*
  88. * For spinlocks, etc:
  89. */
  90. static inline notrace unsigned long arch_local_irq_save(void)
  91. {
  92. unsigned long flags = arch_local_save_flags();
  93. arch_local_irq_disable();
  94. return flags;
  95. }
  96. #else
  97. #define ENABLE_INTERRUPTS(x) sti
  98. #define DISABLE_INTERRUPTS(x) cli
  99. #ifdef CONFIG_X86_64
  100. #define SWAPGS swapgs
  101. /*
  102. * Currently paravirt can't handle swapgs nicely when we
  103. * don't have a stack we can rely on (such as a user space
  104. * stack). So we either find a way around these or just fault
  105. * and emulate if a guest tries to call swapgs directly.
  106. *
  107. * Either way, this is a good way to document that we don't
  108. * have a reliable stack. x86_64 only.
  109. */
  110. #define SWAPGS_UNSAFE_STACK swapgs
  111. #define PARAVIRT_ADJUST_EXCEPTION_FRAME /* */
  112. #define INTERRUPT_RETURN jmp native_iret
  113. #define USERGS_SYSRET64 \
  114. swapgs; \
  115. sysretq;
  116. #define USERGS_SYSRET32 \
  117. swapgs; \
  118. sysretl
  119. #ifdef CONFIG_DEBUG_ENTRY
  120. #define SAVE_FLAGS(x) pushfq; popq %rax
  121. #endif
  122. #else
  123. #define INTERRUPT_RETURN iret
  124. #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
  125. #define GET_CR0_INTO_EAX movl %cr0, %eax
  126. #endif
  127. #endif /* __ASSEMBLY__ */
  128. #endif /* CONFIG_PARAVIRT */
  129. #ifndef __ASSEMBLY__
  130. static inline int arch_irqs_disabled_flags(unsigned long flags)
  131. {
  132. return !(flags & X86_EFLAGS_IF);
  133. }
  134. static inline int arch_irqs_disabled(void)
  135. {
  136. unsigned long flags = arch_local_save_flags();
  137. return arch_irqs_disabled_flags(flags);
  138. }
  139. #endif /* !__ASSEMBLY__ */
  140. #ifdef __ASSEMBLY__
  141. #ifdef CONFIG_TRACE_IRQFLAGS
  142. # define TRACE_IRQS_ON call trace_hardirqs_on_thunk;
  143. # define TRACE_IRQS_OFF call trace_hardirqs_off_thunk;
  144. #else
  145. # define TRACE_IRQS_ON
  146. # define TRACE_IRQS_OFF
  147. #endif
  148. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  149. # ifdef CONFIG_X86_64
  150. # define LOCKDEP_SYS_EXIT call lockdep_sys_exit_thunk
  151. # define LOCKDEP_SYS_EXIT_IRQ \
  152. TRACE_IRQS_ON; \
  153. sti; \
  154. call lockdep_sys_exit_thunk; \
  155. cli; \
  156. TRACE_IRQS_OFF;
  157. # else
  158. # define LOCKDEP_SYS_EXIT \
  159. pushl %eax; \
  160. pushl %ecx; \
  161. pushl %edx; \
  162. call lockdep_sys_exit; \
  163. popl %edx; \
  164. popl %ecx; \
  165. popl %eax;
  166. # define LOCKDEP_SYS_EXIT_IRQ
  167. # endif
  168. #else
  169. # define LOCKDEP_SYS_EXIT
  170. # define LOCKDEP_SYS_EXIT_IRQ
  171. #endif
  172. #endif /* __ASSEMBLY__ */
  173. #endif