barrier.h 2.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105
  1. #ifndef _ASM_X86_BARRIER_H
  2. #define _ASM_X86_BARRIER_H
  3. #include <asm/alternative.h>
  4. #include <asm/nops.h>
  5. /*
  6. * Force strict CPU ordering.
  7. * And yes, this is required on UP too when we're talking
  8. * to devices.
  9. */
  10. #ifdef CONFIG_X86_32
  11. /*
  12. * Some non-Intel clones support out of order store. wmb() ceases to be a
  13. * nop for these.
  14. */
  15. #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
  16. #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
  17. #define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
  18. #else
  19. #define mb() asm volatile("mfence":::"memory")
  20. #define rmb() asm volatile("lfence":::"memory")
  21. #define wmb() asm volatile("sfence" ::: "memory")
  22. #endif
  23. #ifdef CONFIG_X86_PPRO_FENCE
  24. #define dma_rmb() rmb()
  25. #else
  26. #define dma_rmb() barrier()
  27. #endif
  28. #define dma_wmb() barrier()
  29. #ifdef CONFIG_SMP
  30. #define smp_mb() mb()
  31. #define smp_rmb() dma_rmb()
  32. #define smp_wmb() barrier()
  33. #define smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
  34. #else /* !SMP */
  35. #define smp_mb() barrier()
  36. #define smp_rmb() barrier()
  37. #define smp_wmb() barrier()
  38. #define smp_store_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0)
  39. #endif /* SMP */
  40. #define read_barrier_depends() do { } while (0)
  41. #define smp_read_barrier_depends() do { } while (0)
  42. #if defined(CONFIG_X86_PPRO_FENCE)
  43. /*
  44. * For this option x86 doesn't have a strong TSO memory
  45. * model and we should fall back to full barriers.
  46. */
  47. #define smp_store_release(p, v) \
  48. do { \
  49. compiletime_assert_atomic_type(*p); \
  50. smp_mb(); \
  51. ACCESS_ONCE(*p) = (v); \
  52. } while (0)
  53. #define smp_load_acquire(p) \
  54. ({ \
  55. typeof(*p) ___p1 = ACCESS_ONCE(*p); \
  56. compiletime_assert_atomic_type(*p); \
  57. smp_mb(); \
  58. ___p1; \
  59. })
  60. #else /* regular x86 TSO memory ordering */
  61. #define smp_store_release(p, v) \
  62. do { \
  63. compiletime_assert_atomic_type(*p); \
  64. barrier(); \
  65. ACCESS_ONCE(*p) = (v); \
  66. } while (0)
  67. #define smp_load_acquire(p) \
  68. ({ \
  69. typeof(*p) ___p1 = ACCESS_ONCE(*p); \
  70. compiletime_assert_atomic_type(*p); \
  71. barrier(); \
  72. ___p1; \
  73. })
  74. #endif
  75. /* Atomic operations are already serializing on x86 */
  76. #define smp_mb__before_atomic() barrier()
  77. #define smp_mb__after_atomic() barrier()
  78. /*
  79. * Stop RDTSC speculation. This is needed when you need to use RDTSC
  80. * (or get_cycles or vread that possibly accesses the TSC) in a defined
  81. * code region.
  82. */
  83. static __always_inline void rdtsc_barrier(void)
  84. {
  85. alternative_2("", "mfence", X86_FEATURE_MFENCE_RDTSC,
  86. "lfence", X86_FEATURE_LFENCE_RDTSC);
  87. }
  88. #endif /* _ASM_X86_BARRIER_H */