|
@@ -35,10 +35,60 @@
|
|
|
#define smp_mb() barrier()
|
|
|
#define smp_rmb() barrier()
|
|
|
#define smp_wmb() barrier()
|
|
|
+
|
|
|
+#define smp_store_release(p, v) \
|
|
|
+do { \
|
|
|
+ compiletime_assert_atomic_type(*p); \
|
|
|
+ smp_mb(); \
|
|
|
+ ACCESS_ONCE(*p) = (v); \
|
|
|
+} while (0)
|
|
|
+
|
|
|
+#define smp_load_acquire(p) \
|
|
|
+({ \
|
|
|
+ typeof(*p) ___p1 = ACCESS_ONCE(*p); \
|
|
|
+ compiletime_assert_atomic_type(*p); \
|
|
|
+ smp_mb(); \
|
|
|
+ ___p1; \
|
|
|
+})
|
|
|
+
|
|
|
#else
|
|
|
+
|
|
|
#define smp_mb() asm volatile("dmb ish" : : : "memory")
|
|
|
#define smp_rmb() asm volatile("dmb ishld" : : : "memory")
|
|
|
#define smp_wmb() asm volatile("dmb ishst" : : : "memory")
|
|
|
+
|
|
|
+#define smp_store_release(p, v) \
|
|
|
+do { \
|
|
|
+ compiletime_assert_atomic_type(*p); \
|
|
|
+ switch (sizeof(*p)) { \
|
|
|
+ case 4: \
|
|
|
+ asm volatile ("stlr %w1, %0" \
|
|
|
+ : "=Q" (*p) : "r" (v) : "memory"); \
|
|
|
+ break; \
|
|
|
+ case 8: \
|
|
|
+ asm volatile ("stlr %1, %0" \
|
|
|
+ : "=Q" (*p) : "r" (v) : "memory"); \
|
|
|
+ break; \
|
|
|
+ } \
|
|
|
+} while (0)
|
|
|
+
|
|
|
+#define smp_load_acquire(p) \
|
|
|
+({ \
|
|
|
+ typeof(*p) ___p1; \
|
|
|
+ compiletime_assert_atomic_type(*p); \
|
|
|
+ switch (sizeof(*p)) { \
|
|
|
+ case 4: \
|
|
|
+ asm volatile ("ldar %w0, %1" \
|
|
|
+ : "=r" (___p1) : "Q" (*p) : "memory"); \
|
|
|
+ break; \
|
|
|
+ case 8: \
|
|
|
+ asm volatile ("ldar %0, %1" \
|
|
|
+ : "=r" (___p1) : "Q" (*p) : "memory"); \
|
|
|
+ break; \
|
|
|
+ } \
|
|
|
+ ___p1; \
|
|
|
+})
|
|
|
+
|
|
|
#endif
|
|
|
|
|
|
#define read_barrier_depends() do { } while(0)
|