|
@@ -64,27 +64,31 @@ do { \
|
|
|
|
|
|
#define smp_load_acquire(p) \
|
|
#define smp_load_acquire(p) \
|
|
({ \
|
|
({ \
|
|
- typeof(*p) ___p1; \
|
|
|
|
|
|
+ union { typeof(*p) __val; char __c[1]; } __u; \
|
|
compiletime_assert_atomic_type(*p); \
|
|
compiletime_assert_atomic_type(*p); \
|
|
switch (sizeof(*p)) { \
|
|
switch (sizeof(*p)) { \
|
|
case 1: \
|
|
case 1: \
|
|
asm volatile ("ldarb %w0, %1" \
|
|
asm volatile ("ldarb %w0, %1" \
|
|
- : "=r" (___p1) : "Q" (*p) : "memory"); \
|
|
|
|
|
|
+ : "=r" (*(__u8 *)__u.__c) \
|
|
|
|
+ : "Q" (*p) : "memory"); \
|
|
break; \
|
|
break; \
|
|
case 2: \
|
|
case 2: \
|
|
asm volatile ("ldarh %w0, %1" \
|
|
asm volatile ("ldarh %w0, %1" \
|
|
- : "=r" (___p1) : "Q" (*p) : "memory"); \
|
|
|
|
|
|
+ : "=r" (*(__u16 *)__u.__c) \
|
|
|
|
+ : "Q" (*p) : "memory"); \
|
|
break; \
|
|
break; \
|
|
case 4: \
|
|
case 4: \
|
|
asm volatile ("ldar %w0, %1" \
|
|
asm volatile ("ldar %w0, %1" \
|
|
- : "=r" (___p1) : "Q" (*p) : "memory"); \
|
|
|
|
|
|
+ : "=r" (*(__u32 *)__u.__c) \
|
|
|
|
+ : "Q" (*p) : "memory"); \
|
|
break; \
|
|
break; \
|
|
case 8: \
|
|
case 8: \
|
|
asm volatile ("ldar %0, %1" \
|
|
asm volatile ("ldar %0, %1" \
|
|
- : "=r" (___p1) : "Q" (*p) : "memory"); \
|
|
|
|
|
|
+ : "=r" (*(__u64 *)__u.__c) \
|
|
|
|
+ : "Q" (*p) : "memory"); \
|
|
break; \
|
|
break; \
|
|
} \
|
|
} \
|
|
- ___p1; \
|
|
|
|
|
|
+ __u.__val; \
|
|
})
|
|
})
|
|
|
|
|
|
#define read_barrier_depends() do { } while(0)
|
|
#define read_barrier_depends() do { } while(0)
|