atomic_ops.h 3.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130
  1. /*
  2. * Low level function for atomic operations
  3. *
  4. * Copyright IBM Corp. 1999, 2016
  5. */
  6. #ifndef __ARCH_S390_ATOMIC_OPS__
  7. #define __ARCH_S390_ATOMIC_OPS__
  8. #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
  9. #define __ATOMIC_OP(op_name, op_type, op_string, op_barrier) \
  10. static inline op_type op_name(op_type val, op_type *ptr) \
  11. { \
  12. op_type old; \
  13. \
  14. asm volatile( \
  15. op_string " %[old],%[val],%[ptr]\n" \
  16. op_barrier \
  17. : [old] "=d" (old), [ptr] "+Q" (*ptr) \
  18. : [val] "d" (val) : "cc", "memory"); \
  19. return old; \
  20. } \
  21. #define __ATOMIC_OPS(op_name, op_type, op_string) \
  22. __ATOMIC_OP(op_name, op_type, op_string, "\n") \
  23. __ATOMIC_OP(op_name##_barrier, op_type, op_string, "bcr 14,0\n")
  24. __ATOMIC_OPS(__atomic_add, int, "laa")
  25. __ATOMIC_OPS(__atomic_and, int, "lan")
  26. __ATOMIC_OPS(__atomic_or, int, "lao")
  27. __ATOMIC_OPS(__atomic_xor, int, "lax")
  28. __ATOMIC_OPS(__atomic64_add, long, "laag")
  29. __ATOMIC_OPS(__atomic64_and, long, "lang")
  30. __ATOMIC_OPS(__atomic64_or, long, "laog")
  31. __ATOMIC_OPS(__atomic64_xor, long, "laxg")
  32. #undef __ATOMIC_OPS
  33. #undef __ATOMIC_OP
  34. static inline void __atomic_add_const(int val, int *ptr)
  35. {
  36. asm volatile(
  37. " asi %[ptr],%[val]\n"
  38. : [ptr] "+Q" (*ptr) : [val] "i" (val) : "cc");
  39. }
  40. static inline void __atomic64_add_const(long val, long *ptr)
  41. {
  42. asm volatile(
  43. " agsi %[ptr],%[val]\n"
  44. : [ptr] "+Q" (*ptr) : [val] "i" (val) : "cc");
  45. }
  46. #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
  47. #define __ATOMIC_OP(op_name, op_string) \
  48. static inline int op_name(int val, int *ptr) \
  49. { \
  50. int old, new; \
  51. \
  52. asm volatile( \
  53. "0: lr %[new],%[old]\n" \
  54. op_string " %[new],%[val]\n" \
  55. " cs %[old],%[new],%[ptr]\n" \
  56. " jl 0b" \
  57. : [old] "=d" (old), [new] "=&d" (new), [ptr] "+Q" (*ptr)\
  58. : [val] "d" (val), "0" (*ptr) : "cc", "memory"); \
  59. return old; \
  60. }
  61. #define __ATOMIC_OPS(op_name, op_string) \
  62. __ATOMIC_OP(op_name, op_string) \
  63. __ATOMIC_OP(op_name##_barrier, op_string)
  64. __ATOMIC_OPS(__atomic_add, "ar")
  65. __ATOMIC_OPS(__atomic_and, "nr")
  66. __ATOMIC_OPS(__atomic_or, "or")
  67. __ATOMIC_OPS(__atomic_xor, "xr")
  68. #undef __ATOMIC_OPS
  69. #define __ATOMIC64_OP(op_name, op_string) \
  70. static inline long op_name(long val, long *ptr) \
  71. { \
  72. long old, new; \
  73. \
  74. asm volatile( \
  75. "0: lgr %[new],%[old]\n" \
  76. op_string " %[new],%[val]\n" \
  77. " csg %[old],%[new],%[ptr]\n" \
  78. " jl 0b" \
  79. : [old] "=d" (old), [new] "=&d" (new), [ptr] "+Q" (*ptr)\
  80. : [val] "d" (val), "0" (*ptr) : "cc", "memory"); \
  81. return old; \
  82. }
  83. #define __ATOMIC64_OPS(op_name, op_string) \
  84. __ATOMIC64_OP(op_name, op_string) \
  85. __ATOMIC64_OP(op_name##_barrier, op_string)
  86. __ATOMIC64_OPS(__atomic64_add, "agr")
  87. __ATOMIC64_OPS(__atomic64_and, "ngr")
  88. __ATOMIC64_OPS(__atomic64_or, "ogr")
  89. __ATOMIC64_OPS(__atomic64_xor, "xgr")
  90. #undef __ATOMIC64_OPS
  91. #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
  92. static inline int __atomic_cmpxchg(int *ptr, int old, int new)
  93. {
  94. asm volatile(
  95. " cs %[old],%[new],%[ptr]"
  96. : [old] "+d" (old), [ptr] "+Q" (*ptr)
  97. : [new] "d" (new) : "cc", "memory");
  98. return old;
  99. }
  100. static inline long __atomic64_cmpxchg(long *ptr, long old, long new)
  101. {
  102. asm volatile(
  103. " csg %[old],%[new],%[ptr]"
  104. : [old] "+d" (old), [ptr] "+Q" (*ptr)
  105. : [new] "d" (new) : "cc", "memory");
  106. return old;
  107. }
  108. #endif /* __ARCH_S390_ATOMIC_OPS__ */