atomic-instrumented.h 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476
  1. /*
  2. * This file provides wrappers with KASAN instrumentation for atomic operations.
  3. * To use this functionality an arch's atomic.h file needs to define all
  4. * atomic operations with arch_ prefix (e.g. arch_atomic_read()) and include
  5. * this file at the end. This file provides atomic_read() that forwards to
  6. * arch_atomic_read() for actual atomic operation.
  7. * Note: if an arch atomic operation is implemented by means of other atomic
  8. * operations (e.g. atomic_read()/atomic_cmpxchg() loop), then it needs to use
  9. * arch_ variants (i.e. arch_atomic_read()/arch_atomic_cmpxchg()) to avoid
  10. * double instrumentation.
  11. */
  12. #ifndef _LINUX_ATOMIC_INSTRUMENTED_H
  13. #define _LINUX_ATOMIC_INSTRUMENTED_H
  14. #include <linux/build_bug.h>
  15. #include <linux/kasan-checks.h>
  16. static __always_inline int atomic_read(const atomic_t *v)
  17. {
  18. kasan_check_read(v, sizeof(*v));
  19. return arch_atomic_read(v);
  20. }
  21. static __always_inline s64 atomic64_read(const atomic64_t *v)
  22. {
  23. kasan_check_read(v, sizeof(*v));
  24. return arch_atomic64_read(v);
  25. }
  26. static __always_inline void atomic_set(atomic_t *v, int i)
  27. {
  28. kasan_check_write(v, sizeof(*v));
  29. arch_atomic_set(v, i);
  30. }
  31. static __always_inline void atomic64_set(atomic64_t *v, s64 i)
  32. {
  33. kasan_check_write(v, sizeof(*v));
  34. arch_atomic64_set(v, i);
  35. }
  36. static __always_inline int atomic_xchg(atomic_t *v, int i)
  37. {
  38. kasan_check_write(v, sizeof(*v));
  39. return arch_atomic_xchg(v, i);
  40. }
  41. static __always_inline s64 atomic64_xchg(atomic64_t *v, s64 i)
  42. {
  43. kasan_check_write(v, sizeof(*v));
  44. return arch_atomic64_xchg(v, i);
  45. }
  46. static __always_inline int atomic_cmpxchg(atomic_t *v, int old, int new)
  47. {
  48. kasan_check_write(v, sizeof(*v));
  49. return arch_atomic_cmpxchg(v, old, new);
  50. }
  51. static __always_inline s64 atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
  52. {
  53. kasan_check_write(v, sizeof(*v));
  54. return arch_atomic64_cmpxchg(v, old, new);
  55. }
  56. #ifdef arch_atomic_try_cmpxchg
  57. #define atomic_try_cmpxchg atomic_try_cmpxchg
  58. static __always_inline bool atomic_try_cmpxchg(atomic_t *v, int *old, int new)
  59. {
  60. kasan_check_write(v, sizeof(*v));
  61. kasan_check_read(old, sizeof(*old));
  62. return arch_atomic_try_cmpxchg(v, old, new);
  63. }
  64. #endif
  65. #ifdef arch_atomic64_try_cmpxchg
  66. #define atomic64_try_cmpxchg atomic64_try_cmpxchg
  67. static __always_inline bool atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
  68. {
  69. kasan_check_write(v, sizeof(*v));
  70. kasan_check_read(old, sizeof(*old));
  71. return arch_atomic64_try_cmpxchg(v, old, new);
  72. }
  73. #endif
  74. static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
  75. {
  76. kasan_check_write(v, sizeof(*v));
  77. return __arch_atomic_add_unless(v, a, u);
  78. }
  79. static __always_inline bool atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
  80. {
  81. kasan_check_write(v, sizeof(*v));
  82. return arch_atomic64_add_unless(v, a, u);
  83. }
  84. static __always_inline void atomic_inc(atomic_t *v)
  85. {
  86. kasan_check_write(v, sizeof(*v));
  87. arch_atomic_inc(v);
  88. }
  89. static __always_inline void atomic64_inc(atomic64_t *v)
  90. {
  91. kasan_check_write(v, sizeof(*v));
  92. arch_atomic64_inc(v);
  93. }
  94. static __always_inline void atomic_dec(atomic_t *v)
  95. {
  96. kasan_check_write(v, sizeof(*v));
  97. arch_atomic_dec(v);
  98. }
  99. static __always_inline void atomic64_dec(atomic64_t *v)
  100. {
  101. kasan_check_write(v, sizeof(*v));
  102. arch_atomic64_dec(v);
  103. }
  104. static __always_inline void atomic_add(int i, atomic_t *v)
  105. {
  106. kasan_check_write(v, sizeof(*v));
  107. arch_atomic_add(i, v);
  108. }
  109. static __always_inline void atomic64_add(s64 i, atomic64_t *v)
  110. {
  111. kasan_check_write(v, sizeof(*v));
  112. arch_atomic64_add(i, v);
  113. }
  114. static __always_inline void atomic_sub(int i, atomic_t *v)
  115. {
  116. kasan_check_write(v, sizeof(*v));
  117. arch_atomic_sub(i, v);
  118. }
  119. static __always_inline void atomic64_sub(s64 i, atomic64_t *v)
  120. {
  121. kasan_check_write(v, sizeof(*v));
  122. arch_atomic64_sub(i, v);
  123. }
  124. static __always_inline void atomic_and(int i, atomic_t *v)
  125. {
  126. kasan_check_write(v, sizeof(*v));
  127. arch_atomic_and(i, v);
  128. }
  129. static __always_inline void atomic64_and(s64 i, atomic64_t *v)
  130. {
  131. kasan_check_write(v, sizeof(*v));
  132. arch_atomic64_and(i, v);
  133. }
  134. static __always_inline void atomic_or(int i, atomic_t *v)
  135. {
  136. kasan_check_write(v, sizeof(*v));
  137. arch_atomic_or(i, v);
  138. }
  139. static __always_inline void atomic64_or(s64 i, atomic64_t *v)
  140. {
  141. kasan_check_write(v, sizeof(*v));
  142. arch_atomic64_or(i, v);
  143. }
  144. static __always_inline void atomic_xor(int i, atomic_t *v)
  145. {
  146. kasan_check_write(v, sizeof(*v));
  147. arch_atomic_xor(i, v);
  148. }
  149. static __always_inline void atomic64_xor(s64 i, atomic64_t *v)
  150. {
  151. kasan_check_write(v, sizeof(*v));
  152. arch_atomic64_xor(i, v);
  153. }
  154. static __always_inline int atomic_inc_return(atomic_t *v)
  155. {
  156. kasan_check_write(v, sizeof(*v));
  157. return arch_atomic_inc_return(v);
  158. }
  159. static __always_inline s64 atomic64_inc_return(atomic64_t *v)
  160. {
  161. kasan_check_write(v, sizeof(*v));
  162. return arch_atomic64_inc_return(v);
  163. }
  164. static __always_inline int atomic_dec_return(atomic_t *v)
  165. {
  166. kasan_check_write(v, sizeof(*v));
  167. return arch_atomic_dec_return(v);
  168. }
  169. static __always_inline s64 atomic64_dec_return(atomic64_t *v)
  170. {
  171. kasan_check_write(v, sizeof(*v));
  172. return arch_atomic64_dec_return(v);
  173. }
  174. static __always_inline s64 atomic64_inc_not_zero(atomic64_t *v)
  175. {
  176. kasan_check_write(v, sizeof(*v));
  177. return arch_atomic64_inc_not_zero(v);
  178. }
  179. static __always_inline s64 atomic64_dec_if_positive(atomic64_t *v)
  180. {
  181. kasan_check_write(v, sizeof(*v));
  182. return arch_atomic64_dec_if_positive(v);
  183. }
  184. static __always_inline bool atomic_dec_and_test(atomic_t *v)
  185. {
  186. kasan_check_write(v, sizeof(*v));
  187. return arch_atomic_dec_and_test(v);
  188. }
  189. static __always_inline bool atomic64_dec_and_test(atomic64_t *v)
  190. {
  191. kasan_check_write(v, sizeof(*v));
  192. return arch_atomic64_dec_and_test(v);
  193. }
  194. static __always_inline bool atomic_inc_and_test(atomic_t *v)
  195. {
  196. kasan_check_write(v, sizeof(*v));
  197. return arch_atomic_inc_and_test(v);
  198. }
  199. static __always_inline bool atomic64_inc_and_test(atomic64_t *v)
  200. {
  201. kasan_check_write(v, sizeof(*v));
  202. return arch_atomic64_inc_and_test(v);
  203. }
  204. static __always_inline int atomic_add_return(int i, atomic_t *v)
  205. {
  206. kasan_check_write(v, sizeof(*v));
  207. return arch_atomic_add_return(i, v);
  208. }
  209. static __always_inline s64 atomic64_add_return(s64 i, atomic64_t *v)
  210. {
  211. kasan_check_write(v, sizeof(*v));
  212. return arch_atomic64_add_return(i, v);
  213. }
  214. static __always_inline int atomic_sub_return(int i, atomic_t *v)
  215. {
  216. kasan_check_write(v, sizeof(*v));
  217. return arch_atomic_sub_return(i, v);
  218. }
  219. static __always_inline s64 atomic64_sub_return(s64 i, atomic64_t *v)
  220. {
  221. kasan_check_write(v, sizeof(*v));
  222. return arch_atomic64_sub_return(i, v);
  223. }
  224. static __always_inline int atomic_fetch_add(int i, atomic_t *v)
  225. {
  226. kasan_check_write(v, sizeof(*v));
  227. return arch_atomic_fetch_add(i, v);
  228. }
  229. static __always_inline s64 atomic64_fetch_add(s64 i, atomic64_t *v)
  230. {
  231. kasan_check_write(v, sizeof(*v));
  232. return arch_atomic64_fetch_add(i, v);
  233. }
  234. static __always_inline int atomic_fetch_sub(int i, atomic_t *v)
  235. {
  236. kasan_check_write(v, sizeof(*v));
  237. return arch_atomic_fetch_sub(i, v);
  238. }
  239. static __always_inline s64 atomic64_fetch_sub(s64 i, atomic64_t *v)
  240. {
  241. kasan_check_write(v, sizeof(*v));
  242. return arch_atomic64_fetch_sub(i, v);
  243. }
  244. static __always_inline int atomic_fetch_and(int i, atomic_t *v)
  245. {
  246. kasan_check_write(v, sizeof(*v));
  247. return arch_atomic_fetch_and(i, v);
  248. }
  249. static __always_inline s64 atomic64_fetch_and(s64 i, atomic64_t *v)
  250. {
  251. kasan_check_write(v, sizeof(*v));
  252. return arch_atomic64_fetch_and(i, v);
  253. }
  254. static __always_inline int atomic_fetch_or(int i, atomic_t *v)
  255. {
  256. kasan_check_write(v, sizeof(*v));
  257. return arch_atomic_fetch_or(i, v);
  258. }
  259. static __always_inline s64 atomic64_fetch_or(s64 i, atomic64_t *v)
  260. {
  261. kasan_check_write(v, sizeof(*v));
  262. return arch_atomic64_fetch_or(i, v);
  263. }
  264. static __always_inline int atomic_fetch_xor(int i, atomic_t *v)
  265. {
  266. kasan_check_write(v, sizeof(*v));
  267. return arch_atomic_fetch_xor(i, v);
  268. }
  269. static __always_inline s64 atomic64_fetch_xor(s64 i, atomic64_t *v)
  270. {
  271. kasan_check_write(v, sizeof(*v));
  272. return arch_atomic64_fetch_xor(i, v);
  273. }
  274. static __always_inline bool atomic_sub_and_test(int i, atomic_t *v)
  275. {
  276. kasan_check_write(v, sizeof(*v));
  277. return arch_atomic_sub_and_test(i, v);
  278. }
  279. static __always_inline bool atomic64_sub_and_test(s64 i, atomic64_t *v)
  280. {
  281. kasan_check_write(v, sizeof(*v));
  282. return arch_atomic64_sub_and_test(i, v);
  283. }
  284. static __always_inline bool atomic_add_negative(int i, atomic_t *v)
  285. {
  286. kasan_check_write(v, sizeof(*v));
  287. return arch_atomic_add_negative(i, v);
  288. }
  289. static __always_inline bool atomic64_add_negative(s64 i, atomic64_t *v)
  290. {
  291. kasan_check_write(v, sizeof(*v));
  292. return arch_atomic64_add_negative(i, v);
  293. }
  294. static __always_inline unsigned long
  295. cmpxchg_size(volatile void *ptr, unsigned long old, unsigned long new, int size)
  296. {
  297. kasan_check_write(ptr, size);
  298. switch (size) {
  299. case 1:
  300. return arch_cmpxchg((u8 *)ptr, (u8)old, (u8)new);
  301. case 2:
  302. return arch_cmpxchg((u16 *)ptr, (u16)old, (u16)new);
  303. case 4:
  304. return arch_cmpxchg((u32 *)ptr, (u32)old, (u32)new);
  305. case 8:
  306. BUILD_BUG_ON(sizeof(unsigned long) != 8);
  307. return arch_cmpxchg((u64 *)ptr, (u64)old, (u64)new);
  308. }
  309. BUILD_BUG();
  310. return 0;
  311. }
  312. #define cmpxchg(ptr, old, new) \
  313. ({ \
  314. ((__typeof__(*(ptr)))cmpxchg_size((ptr), (unsigned long)(old), \
  315. (unsigned long)(new), sizeof(*(ptr)))); \
  316. })
  317. static __always_inline unsigned long
  318. sync_cmpxchg_size(volatile void *ptr, unsigned long old, unsigned long new,
  319. int size)
  320. {
  321. kasan_check_write(ptr, size);
  322. switch (size) {
  323. case 1:
  324. return arch_sync_cmpxchg((u8 *)ptr, (u8)old, (u8)new);
  325. case 2:
  326. return arch_sync_cmpxchg((u16 *)ptr, (u16)old, (u16)new);
  327. case 4:
  328. return arch_sync_cmpxchg((u32 *)ptr, (u32)old, (u32)new);
  329. case 8:
  330. BUILD_BUG_ON(sizeof(unsigned long) != 8);
  331. return arch_sync_cmpxchg((u64 *)ptr, (u64)old, (u64)new);
  332. }
  333. BUILD_BUG();
  334. return 0;
  335. }
  336. #define sync_cmpxchg(ptr, old, new) \
  337. ({ \
  338. ((__typeof__(*(ptr)))sync_cmpxchg_size((ptr), \
  339. (unsigned long)(old), (unsigned long)(new), \
  340. sizeof(*(ptr)))); \
  341. })
  342. static __always_inline unsigned long
  343. cmpxchg_local_size(volatile void *ptr, unsigned long old, unsigned long new,
  344. int size)
  345. {
  346. kasan_check_write(ptr, size);
  347. switch (size) {
  348. case 1:
  349. return arch_cmpxchg_local((u8 *)ptr, (u8)old, (u8)new);
  350. case 2:
  351. return arch_cmpxchg_local((u16 *)ptr, (u16)old, (u16)new);
  352. case 4:
  353. return arch_cmpxchg_local((u32 *)ptr, (u32)old, (u32)new);
  354. case 8:
  355. BUILD_BUG_ON(sizeof(unsigned long) != 8);
  356. return arch_cmpxchg_local((u64 *)ptr, (u64)old, (u64)new);
  357. }
  358. BUILD_BUG();
  359. return 0;
  360. }
  361. #define cmpxchg_local(ptr, old, new) \
  362. ({ \
  363. ((__typeof__(*(ptr)))cmpxchg_local_size((ptr), \
  364. (unsigned long)(old), (unsigned long)(new), \
  365. sizeof(*(ptr)))); \
  366. })
  367. static __always_inline u64
  368. cmpxchg64_size(volatile u64 *ptr, u64 old, u64 new)
  369. {
  370. kasan_check_write(ptr, sizeof(*ptr));
  371. return arch_cmpxchg64(ptr, old, new);
  372. }
  373. #define cmpxchg64(ptr, old, new) \
  374. ({ \
  375. ((__typeof__(*(ptr)))cmpxchg64_size((ptr), (u64)(old), \
  376. (u64)(new))); \
  377. })
  378. static __always_inline u64
  379. cmpxchg64_local_size(volatile u64 *ptr, u64 old, u64 new)
  380. {
  381. kasan_check_write(ptr, sizeof(*ptr));
  382. return arch_cmpxchg64_local(ptr, old, new);
  383. }
  384. #define cmpxchg64_local(ptr, old, new) \
  385. ({ \
  386. ((__typeof__(*(ptr)))cmpxchg64_local_size((ptr), (u64)(old), \
  387. (u64)(new))); \
  388. })
  389. /*
  390. * Originally we had the following code here:
  391. * __typeof__(p1) ____p1 = (p1);
  392. * kasan_check_write(____p1, 2 * sizeof(*____p1));
  393. * arch_cmpxchg_double(____p1, (p2), (o1), (o2), (n1), (n2));
  394. * But it leads to compilation failures (see gcc issue 72873).
  395. * So for now it's left non-instrumented.
  396. * There are few callers of cmpxchg_double(), so it's not critical.
  397. */
  398. #define cmpxchg_double(p1, p2, o1, o2, n1, n2) \
  399. ({ \
  400. arch_cmpxchg_double((p1), (p2), (o1), (o2), (n1), (n2)); \
  401. })
  402. #define cmpxchg_double_local(p1, p2, o1, o2, n1, n2) \
  403. ({ \
  404. arch_cmpxchg_double_local((p1), (p2), (o1), (o2), (n1), (n2)); \
  405. })
  406. #endif /* _LINUX_ATOMIC_INSTRUMENTED_H */