cmpxchg_64.h 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /* 64-bit atomic xchg() and cmpxchg() definitions.
  3. *
  4. * Copyright (C) 1996, 1997, 2000 David S. Miller (davem@redhat.com)
  5. */
  6. #ifndef __ARCH_SPARC64_CMPXCHG__
  7. #define __ARCH_SPARC64_CMPXCHG__
  8. static inline unsigned long
  9. __cmpxchg_u32(volatile int *m, int old, int new)
  10. {
  11. __asm__ __volatile__("cas [%2], %3, %0"
  12. : "=&r" (new)
  13. : "0" (new), "r" (m), "r" (old)
  14. : "memory");
  15. return new;
  16. }
  17. static inline unsigned long xchg32(__volatile__ unsigned int *m, unsigned int val)
  18. {
  19. unsigned long tmp1, tmp2;
  20. __asm__ __volatile__(
  21. " mov %0, %1\n"
  22. "1: lduw [%4], %2\n"
  23. " cas [%4], %2, %0\n"
  24. " cmp %2, %0\n"
  25. " bne,a,pn %%icc, 1b\n"
  26. " mov %1, %0\n"
  27. : "=&r" (val), "=&r" (tmp1), "=&r" (tmp2)
  28. : "0" (val), "r" (m)
  29. : "cc", "memory");
  30. return val;
  31. }
  32. static inline unsigned long xchg64(__volatile__ unsigned long *m, unsigned long val)
  33. {
  34. unsigned long tmp1, tmp2;
  35. __asm__ __volatile__(
  36. " mov %0, %1\n"
  37. "1: ldx [%4], %2\n"
  38. " casx [%4], %2, %0\n"
  39. " cmp %2, %0\n"
  40. " bne,a,pn %%xcc, 1b\n"
  41. " mov %1, %0\n"
  42. : "=&r" (val), "=&r" (tmp1), "=&r" (tmp2)
  43. : "0" (val), "r" (m)
  44. : "cc", "memory");
  45. return val;
  46. }
  47. #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
  48. void __xchg_called_with_bad_pointer(void);
  49. /*
  50. * Use 4 byte cas instruction to achieve 2 byte xchg. Main logic
  51. * here is to get the bit shift of the byte we are interested in.
  52. * The XOR is handy for reversing the bits for big-endian byte order.
  53. */
  54. static inline unsigned long
  55. xchg16(__volatile__ unsigned short *m, unsigned short val)
  56. {
  57. unsigned long maddr = (unsigned long)m;
  58. int bit_shift = (((unsigned long)m & 2) ^ 2) << 3;
  59. unsigned int mask = 0xffff << bit_shift;
  60. unsigned int *ptr = (unsigned int *) (maddr & ~2);
  61. unsigned int old32, new32, load32;
  62. /* Read the old value */
  63. load32 = *ptr;
  64. do {
  65. old32 = load32;
  66. new32 = (load32 & (~mask)) | val << bit_shift;
  67. load32 = __cmpxchg_u32(ptr, old32, new32);
  68. } while (load32 != old32);
  69. return (load32 & mask) >> bit_shift;
  70. }
  71. static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr,
  72. int size)
  73. {
  74. switch (size) {
  75. case 2:
  76. return xchg16(ptr, x);
  77. case 4:
  78. return xchg32(ptr, x);
  79. case 8:
  80. return xchg64(ptr, x);
  81. }
  82. __xchg_called_with_bad_pointer();
  83. return x;
  84. }
  85. /*
  86. * Atomic compare and exchange. Compare OLD with MEM, if identical,
  87. * store NEW in MEM. Return the initial value in MEM. Success is
  88. * indicated by comparing RETURN with OLD.
  89. */
  90. #include <asm-generic/cmpxchg-local.h>
  91. static inline unsigned long
  92. __cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new)
  93. {
  94. __asm__ __volatile__("casx [%2], %3, %0"
  95. : "=&r" (new)
  96. : "0" (new), "r" (m), "r" (old)
  97. : "memory");
  98. return new;
  99. }
  100. /*
  101. * Use 4 byte cas instruction to achieve 1 byte cmpxchg. Main logic
  102. * here is to get the bit shift of the byte we are interested in.
  103. * The XOR is handy for reversing the bits for big-endian byte order
  104. */
  105. static inline unsigned long
  106. __cmpxchg_u8(volatile unsigned char *m, unsigned char old, unsigned char new)
  107. {
  108. unsigned long maddr = (unsigned long)m;
  109. int bit_shift = (((unsigned long)m & 3) ^ 3) << 3;
  110. unsigned int mask = 0xff << bit_shift;
  111. unsigned int *ptr = (unsigned int *) (maddr & ~3);
  112. unsigned int old32, new32, load;
  113. unsigned int load32 = *ptr;
  114. do {
  115. new32 = (load32 & ~mask) | (new << bit_shift);
  116. old32 = (load32 & ~mask) | (old << bit_shift);
  117. load32 = __cmpxchg_u32(ptr, old32, new32);
  118. if (load32 == old32)
  119. return old;
  120. load = (load32 & mask) >> bit_shift;
  121. } while (load == old);
  122. return load;
  123. }
  124. /* This function doesn't exist, so you'll get a linker error
  125. if something tries to do an invalid cmpxchg(). */
  126. void __cmpxchg_called_with_bad_pointer(void);
  127. static inline unsigned long
  128. __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
  129. {
  130. switch (size) {
  131. case 1:
  132. return __cmpxchg_u8(ptr, old, new);
  133. case 4:
  134. return __cmpxchg_u32(ptr, old, new);
  135. case 8:
  136. return __cmpxchg_u64(ptr, old, new);
  137. }
  138. __cmpxchg_called_with_bad_pointer();
  139. return old;
  140. }
  141. #define cmpxchg(ptr,o,n) \
  142. ({ \
  143. __typeof__(*(ptr)) _o_ = (o); \
  144. __typeof__(*(ptr)) _n_ = (n); \
  145. (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
  146. (unsigned long)_n_, sizeof(*(ptr))); \
  147. })
  148. /*
  149. * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
  150. * them available.
  151. */
  152. static inline unsigned long __cmpxchg_local(volatile void *ptr,
  153. unsigned long old,
  154. unsigned long new, int size)
  155. {
  156. switch (size) {
  157. case 4:
  158. case 8: return __cmpxchg(ptr, old, new, size);
  159. default:
  160. return __cmpxchg_local_generic(ptr, old, new, size);
  161. }
  162. return old;
  163. }
  164. #define cmpxchg_local(ptr, o, n) \
  165. ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
  166. (unsigned long)(n), sizeof(*(ptr))))
  167. #define cmpxchg64_local(ptr, o, n) \
  168. ({ \
  169. BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
  170. cmpxchg_local((ptr), (o), (n)); \
  171. })
  172. #define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n))
  173. #endif /* __ARCH_SPARC64_CMPXCHG__ */