123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102 |
- #ifndef _TOOLS_LINUX_COMPILER_H_
- #define _TOOLS_LINUX_COMPILER_H_
- /* Optimization barrier */
- /* The "volatile" is due to gcc bugs */
- #define barrier() __asm__ __volatile__("": : :"memory")
- #ifndef __always_inline
- # define __always_inline inline __attribute__((always_inline))
- #endif
- #define __user
- #ifndef __attribute_const__
- # define __attribute_const__
- #endif
- #ifndef __maybe_unused
- # define __maybe_unused __attribute__((unused))
- #endif
- #ifndef __packed
- # define __packed __attribute__((__packed__))
- #endif
- #ifndef __force
- # define __force
- #endif
- #ifndef __weak
- # define __weak __attribute__((weak))
- #endif
- #ifndef likely
- # define likely(x) __builtin_expect(!!(x), 1)
- #endif
- #ifndef unlikely
- # define unlikely(x) __builtin_expect(!!(x), 0)
- #endif
- #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
- #include <linux/types.h>
- static __always_inline void __read_once_size(const volatile void *p, void *res, int size)
- {
- switch (size) {
- case 1: *(__u8 *)res = *(volatile __u8 *)p; break;
- case 2: *(__u16 *)res = *(volatile __u16 *)p; break;
- case 4: *(__u32 *)res = *(volatile __u32 *)p; break;
- case 8: *(__u64 *)res = *(volatile __u64 *)p; break;
- default:
- barrier();
- __builtin_memcpy((void *)res, (const void *)p, size);
- barrier();
- }
- }
- static __always_inline void __write_once_size(volatile void *p, void *res, int size)
- {
- switch (size) {
- case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
- case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
- case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
- case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
- default:
- barrier();
- __builtin_memcpy((void *)p, (const void *)res, size);
- barrier();
- }
- }
- /*
- * Prevent the compiler from merging or refetching reads or writes. The
- * compiler is also forbidden from reordering successive instances of
- * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the
- * compiler is aware of some particular ordering. One way to make the
- * compiler aware of ordering is to put the two invocations of READ_ONCE,
- * WRITE_ONCE or ACCESS_ONCE() in different C statements.
- *
- * In contrast to ACCESS_ONCE these two macros will also work on aggregate
- * data types like structs or unions. If the size of the accessed data
- * type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
- * READ_ONCE() and WRITE_ONCE() will fall back to memcpy and print a
- * compile-time warning.
- *
- * Their two major use cases are: (1) Mediating communication between
- * process-level code and irq/NMI handlers, all running on the same CPU,
- * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
- * mutilate accesses that either do not require ordering or that interact
- * with an explicit memory barrier or atomic instruction that provides the
- * required ordering.
- */
- #define READ_ONCE(x) \
- ({ union { typeof(x) __val; char __c[1]; } __u; __read_once_size(&(x), __u.__c, sizeof(x)); __u.__val; })
- #define WRITE_ONCE(x, val) \
- ({ union { typeof(x) __val; char __c[1]; } __u = { .__val = (val) }; __write_once_size(&(x), __u.__c, sizeof(x)); __u.__val; })
- #endif /* _TOOLS_LINUX_COMPILER_H */
|