atomic-instrumented.h 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508
  1. /*
  2. * This file provides wrappers with KASAN instrumentation for atomic operations.
  3. * To use this functionality an arch's atomic.h file needs to define all
  4. * atomic operations with arch_ prefix (e.g. arch_atomic_read()) and include
  5. * this file at the end. This file provides atomic_read() that forwards to
  6. * arch_atomic_read() for actual atomic operation.
  7. * Note: if an arch atomic operation is implemented by means of other atomic
  8. * operations (e.g. atomic_read()/atomic_cmpxchg() loop), then it needs to use
  9. * arch_ variants (i.e. arch_atomic_read()/arch_atomic_cmpxchg()) to avoid
  10. * double instrumentation.
  11. */
  12. #ifndef _LINUX_ATOMIC_INSTRUMENTED_H
  13. #define _LINUX_ATOMIC_INSTRUMENTED_H
  14. #include <linux/build_bug.h>
  15. #include <linux/kasan-checks.h>
  16. static __always_inline int atomic_read(const atomic_t *v)
  17. {
  18. kasan_check_read(v, sizeof(*v));
  19. return arch_atomic_read(v);
  20. }
  21. static __always_inline s64 atomic64_read(const atomic64_t *v)
  22. {
  23. kasan_check_read(v, sizeof(*v));
  24. return arch_atomic64_read(v);
  25. }
  26. static __always_inline void atomic_set(atomic_t *v, int i)
  27. {
  28. kasan_check_write(v, sizeof(*v));
  29. arch_atomic_set(v, i);
  30. }
  31. static __always_inline void atomic64_set(atomic64_t *v, s64 i)
  32. {
  33. kasan_check_write(v, sizeof(*v));
  34. arch_atomic64_set(v, i);
  35. }
  36. static __always_inline int atomic_xchg(atomic_t *v, int i)
  37. {
  38. kasan_check_write(v, sizeof(*v));
  39. return arch_atomic_xchg(v, i);
  40. }
  41. static __always_inline s64 atomic64_xchg(atomic64_t *v, s64 i)
  42. {
  43. kasan_check_write(v, sizeof(*v));
  44. return arch_atomic64_xchg(v, i);
  45. }
  46. static __always_inline int atomic_cmpxchg(atomic_t *v, int old, int new)
  47. {
  48. kasan_check_write(v, sizeof(*v));
  49. return arch_atomic_cmpxchg(v, old, new);
  50. }
  51. static __always_inline s64 atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
  52. {
  53. kasan_check_write(v, sizeof(*v));
  54. return arch_atomic64_cmpxchg(v, old, new);
  55. }
  56. #ifdef arch_atomic_try_cmpxchg
  57. #define atomic_try_cmpxchg atomic_try_cmpxchg
  58. static __always_inline bool atomic_try_cmpxchg(atomic_t *v, int *old, int new)
  59. {
  60. kasan_check_write(v, sizeof(*v));
  61. kasan_check_read(old, sizeof(*old));
  62. return arch_atomic_try_cmpxchg(v, old, new);
  63. }
  64. #endif
  65. #ifdef arch_atomic64_try_cmpxchg
  66. #define atomic64_try_cmpxchg atomic64_try_cmpxchg
  67. static __always_inline bool atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
  68. {
  69. kasan_check_write(v, sizeof(*v));
  70. kasan_check_read(old, sizeof(*old));
  71. return arch_atomic64_try_cmpxchg(v, old, new);
  72. }
  73. #endif
  74. #ifdef arch_atomic_fetch_add_unless
  75. #define atomic_fetch_add_unless atomic_fetch_add_unless
  76. static __always_inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
  77. {
  78. kasan_check_write(v, sizeof(*v));
  79. return arch_atomic_fetch_add_unless(v, a, u);
  80. }
  81. #endif
  82. #ifdef arch_atomic64_fetch_add_unless
  83. #define atomic64_fetch_add_unless atomic64_fetch_add_unless
  84. static __always_inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
  85. {
  86. kasan_check_write(v, sizeof(*v));
  87. return arch_atomic64_fetch_add_unless(v, a, u);
  88. }
  89. #endif
  90. static __always_inline void atomic_inc(atomic_t *v)
  91. {
  92. kasan_check_write(v, sizeof(*v));
  93. arch_atomic_inc(v);
  94. }
  95. static __always_inline void atomic64_inc(atomic64_t *v)
  96. {
  97. kasan_check_write(v, sizeof(*v));
  98. arch_atomic64_inc(v);
  99. }
  100. static __always_inline void atomic_dec(atomic_t *v)
  101. {
  102. kasan_check_write(v, sizeof(*v));
  103. arch_atomic_dec(v);
  104. }
  105. static __always_inline void atomic64_dec(atomic64_t *v)
  106. {
  107. kasan_check_write(v, sizeof(*v));
  108. arch_atomic64_dec(v);
  109. }
  110. static __always_inline void atomic_add(int i, atomic_t *v)
  111. {
  112. kasan_check_write(v, sizeof(*v));
  113. arch_atomic_add(i, v);
  114. }
  115. static __always_inline void atomic64_add(s64 i, atomic64_t *v)
  116. {
  117. kasan_check_write(v, sizeof(*v));
  118. arch_atomic64_add(i, v);
  119. }
  120. static __always_inline void atomic_sub(int i, atomic_t *v)
  121. {
  122. kasan_check_write(v, sizeof(*v));
  123. arch_atomic_sub(i, v);
  124. }
  125. static __always_inline void atomic64_sub(s64 i, atomic64_t *v)
  126. {
  127. kasan_check_write(v, sizeof(*v));
  128. arch_atomic64_sub(i, v);
  129. }
  130. static __always_inline void atomic_and(int i, atomic_t *v)
  131. {
  132. kasan_check_write(v, sizeof(*v));
  133. arch_atomic_and(i, v);
  134. }
  135. static __always_inline void atomic64_and(s64 i, atomic64_t *v)
  136. {
  137. kasan_check_write(v, sizeof(*v));
  138. arch_atomic64_and(i, v);
  139. }
  140. static __always_inline void atomic_or(int i, atomic_t *v)
  141. {
  142. kasan_check_write(v, sizeof(*v));
  143. arch_atomic_or(i, v);
  144. }
  145. static __always_inline void atomic64_or(s64 i, atomic64_t *v)
  146. {
  147. kasan_check_write(v, sizeof(*v));
  148. arch_atomic64_or(i, v);
  149. }
  150. static __always_inline void atomic_xor(int i, atomic_t *v)
  151. {
  152. kasan_check_write(v, sizeof(*v));
  153. arch_atomic_xor(i, v);
  154. }
  155. static __always_inline void atomic64_xor(s64 i, atomic64_t *v)
  156. {
  157. kasan_check_write(v, sizeof(*v));
  158. arch_atomic64_xor(i, v);
  159. }
  160. static __always_inline int atomic_inc_return(atomic_t *v)
  161. {
  162. kasan_check_write(v, sizeof(*v));
  163. return arch_atomic_inc_return(v);
  164. }
  165. static __always_inline s64 atomic64_inc_return(atomic64_t *v)
  166. {
  167. kasan_check_write(v, sizeof(*v));
  168. return arch_atomic64_inc_return(v);
  169. }
  170. static __always_inline int atomic_dec_return(atomic_t *v)
  171. {
  172. kasan_check_write(v, sizeof(*v));
  173. return arch_atomic_dec_return(v);
  174. }
  175. static __always_inline s64 atomic64_dec_return(atomic64_t *v)
  176. {
  177. kasan_check_write(v, sizeof(*v));
  178. return arch_atomic64_dec_return(v);
  179. }
  180. #ifdef arch_atomic64_inc_not_zero
  181. #define atomic64_inc_not_zero atomic64_inc_not_zero
  182. static __always_inline bool atomic64_inc_not_zero(atomic64_t *v)
  183. {
  184. kasan_check_write(v, sizeof(*v));
  185. return arch_atomic64_inc_not_zero(v);
  186. }
  187. #endif
  188. static __always_inline s64 atomic64_dec_if_positive(atomic64_t *v)
  189. {
  190. kasan_check_write(v, sizeof(*v));
  191. return arch_atomic64_dec_if_positive(v);
  192. }
  193. #ifdef arch_atomic_dec_and_test
  194. #define atomic_dec_and_test atomic_dec_and_test
  195. static __always_inline bool atomic_dec_and_test(atomic_t *v)
  196. {
  197. kasan_check_write(v, sizeof(*v));
  198. return arch_atomic_dec_and_test(v);
  199. }
  200. #endif
  201. #ifdef arch_atomic64_dec_and_test
  202. #define atomic64_dec_and_test atomic64_dec_and_test
  203. static __always_inline bool atomic64_dec_and_test(atomic64_t *v)
  204. {
  205. kasan_check_write(v, sizeof(*v));
  206. return arch_atomic64_dec_and_test(v);
  207. }
  208. #endif
  209. #ifdef arch_atomic_inc_and_test
  210. #define atomic_inc_and_test atomic_inc_and_test
  211. static __always_inline bool atomic_inc_and_test(atomic_t *v)
  212. {
  213. kasan_check_write(v, sizeof(*v));
  214. return arch_atomic_inc_and_test(v);
  215. }
  216. #endif
  217. #ifdef arch_atomic64_inc_and_test
  218. #define atomic64_inc_and_test atomic64_inc_and_test
  219. static __always_inline bool atomic64_inc_and_test(atomic64_t *v)
  220. {
  221. kasan_check_write(v, sizeof(*v));
  222. return arch_atomic64_inc_and_test(v);
  223. }
  224. #endif
  225. static __always_inline int atomic_add_return(int i, atomic_t *v)
  226. {
  227. kasan_check_write(v, sizeof(*v));
  228. return arch_atomic_add_return(i, v);
  229. }
  230. static __always_inline s64 atomic64_add_return(s64 i, atomic64_t *v)
  231. {
  232. kasan_check_write(v, sizeof(*v));
  233. return arch_atomic64_add_return(i, v);
  234. }
  235. static __always_inline int atomic_sub_return(int i, atomic_t *v)
  236. {
  237. kasan_check_write(v, sizeof(*v));
  238. return arch_atomic_sub_return(i, v);
  239. }
  240. static __always_inline s64 atomic64_sub_return(s64 i, atomic64_t *v)
  241. {
  242. kasan_check_write(v, sizeof(*v));
  243. return arch_atomic64_sub_return(i, v);
  244. }
  245. static __always_inline int atomic_fetch_add(int i, atomic_t *v)
  246. {
  247. kasan_check_write(v, sizeof(*v));
  248. return arch_atomic_fetch_add(i, v);
  249. }
  250. static __always_inline s64 atomic64_fetch_add(s64 i, atomic64_t *v)
  251. {
  252. kasan_check_write(v, sizeof(*v));
  253. return arch_atomic64_fetch_add(i, v);
  254. }
  255. static __always_inline int atomic_fetch_sub(int i, atomic_t *v)
  256. {
  257. kasan_check_write(v, sizeof(*v));
  258. return arch_atomic_fetch_sub(i, v);
  259. }
  260. static __always_inline s64 atomic64_fetch_sub(s64 i, atomic64_t *v)
  261. {
  262. kasan_check_write(v, sizeof(*v));
  263. return arch_atomic64_fetch_sub(i, v);
  264. }
  265. static __always_inline int atomic_fetch_and(int i, atomic_t *v)
  266. {
  267. kasan_check_write(v, sizeof(*v));
  268. return arch_atomic_fetch_and(i, v);
  269. }
  270. static __always_inline s64 atomic64_fetch_and(s64 i, atomic64_t *v)
  271. {
  272. kasan_check_write(v, sizeof(*v));
  273. return arch_atomic64_fetch_and(i, v);
  274. }
  275. static __always_inline int atomic_fetch_or(int i, atomic_t *v)
  276. {
  277. kasan_check_write(v, sizeof(*v));
  278. return arch_atomic_fetch_or(i, v);
  279. }
  280. static __always_inline s64 atomic64_fetch_or(s64 i, atomic64_t *v)
  281. {
  282. kasan_check_write(v, sizeof(*v));
  283. return arch_atomic64_fetch_or(i, v);
  284. }
  285. static __always_inline int atomic_fetch_xor(int i, atomic_t *v)
  286. {
  287. kasan_check_write(v, sizeof(*v));
  288. return arch_atomic_fetch_xor(i, v);
  289. }
  290. static __always_inline s64 atomic64_fetch_xor(s64 i, atomic64_t *v)
  291. {
  292. kasan_check_write(v, sizeof(*v));
  293. return arch_atomic64_fetch_xor(i, v);
  294. }
  295. #ifdef arch_atomic_sub_and_test
  296. #define atomic_sub_and_test atomic_sub_and_test
  297. static __always_inline bool atomic_sub_and_test(int i, atomic_t *v)
  298. {
  299. kasan_check_write(v, sizeof(*v));
  300. return arch_atomic_sub_and_test(i, v);
  301. }
  302. #endif
  303. #ifdef arch_atomic64_sub_and_test
  304. #define atomic64_sub_and_test atomic64_sub_and_test
  305. static __always_inline bool atomic64_sub_and_test(s64 i, atomic64_t *v)
  306. {
  307. kasan_check_write(v, sizeof(*v));
  308. return arch_atomic64_sub_and_test(i, v);
  309. }
  310. #endif
  311. #ifdef arch_atomic_add_negative
  312. #define atomic_add_negative atomic_add_negative
  313. static __always_inline bool atomic_add_negative(int i, atomic_t *v)
  314. {
  315. kasan_check_write(v, sizeof(*v));
  316. return arch_atomic_add_negative(i, v);
  317. }
  318. #endif
  319. #ifdef arch_atomic64_add_negative
  320. #define atomic64_add_negative atomic64_add_negative
  321. static __always_inline bool atomic64_add_negative(s64 i, atomic64_t *v)
  322. {
  323. kasan_check_write(v, sizeof(*v));
  324. return arch_atomic64_add_negative(i, v);
  325. }
  326. #endif
  327. static __always_inline unsigned long
  328. cmpxchg_size(volatile void *ptr, unsigned long old, unsigned long new, int size)
  329. {
  330. kasan_check_write(ptr, size);
  331. switch (size) {
  332. case 1:
  333. return arch_cmpxchg((u8 *)ptr, (u8)old, (u8)new);
  334. case 2:
  335. return arch_cmpxchg((u16 *)ptr, (u16)old, (u16)new);
  336. case 4:
  337. return arch_cmpxchg((u32 *)ptr, (u32)old, (u32)new);
  338. case 8:
  339. BUILD_BUG_ON(sizeof(unsigned long) != 8);
  340. return arch_cmpxchg((u64 *)ptr, (u64)old, (u64)new);
  341. }
  342. BUILD_BUG();
  343. return 0;
  344. }
  345. #define cmpxchg(ptr, old, new) \
  346. ({ \
  347. ((__typeof__(*(ptr)))cmpxchg_size((ptr), (unsigned long)(old), \
  348. (unsigned long)(new), sizeof(*(ptr)))); \
  349. })
  350. static __always_inline unsigned long
  351. sync_cmpxchg_size(volatile void *ptr, unsigned long old, unsigned long new,
  352. int size)
  353. {
  354. kasan_check_write(ptr, size);
  355. switch (size) {
  356. case 1:
  357. return arch_sync_cmpxchg((u8 *)ptr, (u8)old, (u8)new);
  358. case 2:
  359. return arch_sync_cmpxchg((u16 *)ptr, (u16)old, (u16)new);
  360. case 4:
  361. return arch_sync_cmpxchg((u32 *)ptr, (u32)old, (u32)new);
  362. case 8:
  363. BUILD_BUG_ON(sizeof(unsigned long) != 8);
  364. return arch_sync_cmpxchg((u64 *)ptr, (u64)old, (u64)new);
  365. }
  366. BUILD_BUG();
  367. return 0;
  368. }
  369. #define sync_cmpxchg(ptr, old, new) \
  370. ({ \
  371. ((__typeof__(*(ptr)))sync_cmpxchg_size((ptr), \
  372. (unsigned long)(old), (unsigned long)(new), \
  373. sizeof(*(ptr)))); \
  374. })
  375. static __always_inline unsigned long
  376. cmpxchg_local_size(volatile void *ptr, unsigned long old, unsigned long new,
  377. int size)
  378. {
  379. kasan_check_write(ptr, size);
  380. switch (size) {
  381. case 1:
  382. return arch_cmpxchg_local((u8 *)ptr, (u8)old, (u8)new);
  383. case 2:
  384. return arch_cmpxchg_local((u16 *)ptr, (u16)old, (u16)new);
  385. case 4:
  386. return arch_cmpxchg_local((u32 *)ptr, (u32)old, (u32)new);
  387. case 8:
  388. BUILD_BUG_ON(sizeof(unsigned long) != 8);
  389. return arch_cmpxchg_local((u64 *)ptr, (u64)old, (u64)new);
  390. }
  391. BUILD_BUG();
  392. return 0;
  393. }
  394. #define cmpxchg_local(ptr, old, new) \
  395. ({ \
  396. ((__typeof__(*(ptr)))cmpxchg_local_size((ptr), \
  397. (unsigned long)(old), (unsigned long)(new), \
  398. sizeof(*(ptr)))); \
  399. })
  400. static __always_inline u64
  401. cmpxchg64_size(volatile u64 *ptr, u64 old, u64 new)
  402. {
  403. kasan_check_write(ptr, sizeof(*ptr));
  404. return arch_cmpxchg64(ptr, old, new);
  405. }
  406. #define cmpxchg64(ptr, old, new) \
  407. ({ \
  408. ((__typeof__(*(ptr)))cmpxchg64_size((ptr), (u64)(old), \
  409. (u64)(new))); \
  410. })
  411. static __always_inline u64
  412. cmpxchg64_local_size(volatile u64 *ptr, u64 old, u64 new)
  413. {
  414. kasan_check_write(ptr, sizeof(*ptr));
  415. return arch_cmpxchg64_local(ptr, old, new);
  416. }
  417. #define cmpxchg64_local(ptr, old, new) \
  418. ({ \
  419. ((__typeof__(*(ptr)))cmpxchg64_local_size((ptr), (u64)(old), \
  420. (u64)(new))); \
  421. })
  422. /*
  423. * Originally we had the following code here:
  424. * __typeof__(p1) ____p1 = (p1);
  425. * kasan_check_write(____p1, 2 * sizeof(*____p1));
  426. * arch_cmpxchg_double(____p1, (p2), (o1), (o2), (n1), (n2));
  427. * But it leads to compilation failures (see gcc issue 72873).
  428. * So for now it's left non-instrumented.
  429. * There are few callers of cmpxchg_double(), so it's not critical.
  430. */
  431. #define cmpxchg_double(p1, p2, o1, o2, n1, n2) \
  432. ({ \
  433. arch_cmpxchg_double((p1), (p2), (o1), (o2), (n1), (n2)); \
  434. })
  435. #define cmpxchg_double_local(p1, p2, o1, o2, n1, n2) \
  436. ({ \
  437. arch_cmpxchg_double_local((p1), (p2), (o1), (o2), (n1), (n2)); \
  438. })
  439. #endif /* _LINUX_ATOMIC_INSTRUMENTED_H */