spinlock_api_smp.h 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326
  1. #ifndef __LINUX_SPINLOCK_API_SMP_H
  2. #define __LINUX_SPINLOCK_API_SMP_H
  3. #ifndef __LINUX_SPINLOCK_H
  4. # error "please don't include this file directly"
  5. #endif
  6. /*
  7. * include/linux/spinlock_api_smp.h
  8. *
  9. * spinlock API declarations on SMP (and debug)
  10. * (implemented in kernel/spinlock.c)
  11. *
  12. * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
  13. * Released under the General Public License (GPL).
  14. */
  15. int in_lock_functions(unsigned long addr);
  16. #define assert_spin_locked(x) BUG_ON(!spin_is_locked(x))
  17. void __lockfunc _spin_lock(spinlock_t *lock) __acquires(lock);
  18. void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
  19. __acquires(lock);
  20. void __lockfunc _spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *map)
  21. __acquires(lock);
  22. void __lockfunc _read_lock(rwlock_t *lock) __acquires(lock);
  23. void __lockfunc _write_lock(rwlock_t *lock) __acquires(lock);
  24. void __lockfunc _spin_lock_bh(spinlock_t *lock) __acquires(lock);
  25. void __lockfunc _read_lock_bh(rwlock_t *lock) __acquires(lock);
  26. void __lockfunc _write_lock_bh(rwlock_t *lock) __acquires(lock);
  27. void __lockfunc _spin_lock_irq(spinlock_t *lock) __acquires(lock);
  28. void __lockfunc _read_lock_irq(rwlock_t *lock) __acquires(lock);
  29. void __lockfunc _write_lock_irq(rwlock_t *lock) __acquires(lock);
  30. unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
  31. __acquires(lock);
  32. unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass)
  33. __acquires(lock);
  34. unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
  35. __acquires(lock);
  36. unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
  37. __acquires(lock);
  38. int __lockfunc _spin_trylock(spinlock_t *lock);
  39. int __lockfunc _read_trylock(rwlock_t *lock);
  40. int __lockfunc _write_trylock(rwlock_t *lock);
  41. int __lockfunc _spin_trylock_bh(spinlock_t *lock);
  42. void __lockfunc _spin_unlock(spinlock_t *lock) __releases(lock);
  43. void __lockfunc _read_unlock(rwlock_t *lock) __releases(lock);
  44. void __lockfunc _write_unlock(rwlock_t *lock) __releases(lock);
  45. void __lockfunc _spin_unlock_bh(spinlock_t *lock) __releases(lock);
  46. void __lockfunc _read_unlock_bh(rwlock_t *lock) __releases(lock);
  47. void __lockfunc _write_unlock_bh(rwlock_t *lock) __releases(lock);
  48. void __lockfunc _spin_unlock_irq(spinlock_t *lock) __releases(lock);
  49. void __lockfunc _read_unlock_irq(rwlock_t *lock) __releases(lock);
  50. void __lockfunc _write_unlock_irq(rwlock_t *lock) __releases(lock);
  51. void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
  52. __releases(lock);
  53. void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
  54. __releases(lock);
  55. void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
  56. __releases(lock);
  57. static inline int __spin_trylock(spinlock_t *lock)
  58. {
  59. preempt_disable();
  60. if (_raw_spin_trylock(lock)) {
  61. spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
  62. return 1;
  63. }
  64. preempt_enable();
  65. return 0;
  66. }
  67. static inline int __read_trylock(rwlock_t *lock)
  68. {
  69. preempt_disable();
  70. if (_raw_read_trylock(lock)) {
  71. rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_);
  72. return 1;
  73. }
  74. preempt_enable();
  75. return 0;
  76. }
  77. static inline int __write_trylock(rwlock_t *lock)
  78. {
  79. preempt_disable();
  80. if (_raw_write_trylock(lock)) {
  81. rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_);
  82. return 1;
  83. }
  84. preempt_enable();
  85. return 0;
  86. }
  87. /*
  88. * If lockdep is enabled then we use the non-preemption spin-ops
  89. * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
  90. * not re-enabled during lock-acquire (which the preempt-spin-ops do):
  91. */
  92. #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
  93. static inline void __read_lock(rwlock_t *lock)
  94. {
  95. preempt_disable();
  96. rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
  97. LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
  98. }
  99. static inline unsigned long __spin_lock_irqsave(spinlock_t *lock)
  100. {
  101. unsigned long flags;
  102. local_irq_save(flags);
  103. preempt_disable();
  104. spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
  105. /*
  106. * On lockdep we dont want the hand-coded irq-enable of
  107. * _raw_spin_lock_flags() code, because lockdep assumes
  108. * that interrupts are not re-enabled during lock-acquire:
  109. */
  110. #ifdef CONFIG_LOCKDEP
  111. LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
  112. #else
  113. _raw_spin_lock_flags(lock, &flags);
  114. #endif
  115. return flags;
  116. }
  117. static inline void __spin_lock_irq(spinlock_t *lock)
  118. {
  119. local_irq_disable();
  120. preempt_disable();
  121. spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
  122. LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
  123. }
  124. static inline void __spin_lock_bh(spinlock_t *lock)
  125. {
  126. local_bh_disable();
  127. preempt_disable();
  128. spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
  129. LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
  130. }
  131. static inline unsigned long __read_lock_irqsave(rwlock_t *lock)
  132. {
  133. unsigned long flags;
  134. local_irq_save(flags);
  135. preempt_disable();
  136. rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
  137. LOCK_CONTENDED_FLAGS(lock, _raw_read_trylock, _raw_read_lock,
  138. _raw_read_lock_flags, &flags);
  139. return flags;
  140. }
  141. static inline void __read_lock_irq(rwlock_t *lock)
  142. {
  143. local_irq_disable();
  144. preempt_disable();
  145. rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
  146. LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
  147. }
  148. static inline void __read_lock_bh(rwlock_t *lock)
  149. {
  150. local_bh_disable();
  151. preempt_disable();
  152. rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
  153. LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
  154. }
  155. static inline unsigned long __write_lock_irqsave(rwlock_t *lock)
  156. {
  157. unsigned long flags;
  158. local_irq_save(flags);
  159. preempt_disable();
  160. rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
  161. LOCK_CONTENDED_FLAGS(lock, _raw_write_trylock, _raw_write_lock,
  162. _raw_write_lock_flags, &flags);
  163. return flags;
  164. }
  165. static inline void __write_lock_irq(rwlock_t *lock)
  166. {
  167. local_irq_disable();
  168. preempt_disable();
  169. rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
  170. LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
  171. }
  172. static inline void __write_lock_bh(rwlock_t *lock)
  173. {
  174. local_bh_disable();
  175. preempt_disable();
  176. rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
  177. LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
  178. }
  179. static inline void __spin_lock(spinlock_t *lock)
  180. {
  181. preempt_disable();
  182. spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
  183. LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
  184. }
  185. static inline void __write_lock(rwlock_t *lock)
  186. {
  187. preempt_disable();
  188. rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
  189. LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
  190. }
  191. #endif /* CONFIG_PREEMPT */
  192. static inline void __spin_unlock(spinlock_t *lock)
  193. {
  194. spin_release(&lock->dep_map, 1, _RET_IP_);
  195. _raw_spin_unlock(lock);
  196. preempt_enable();
  197. }
  198. static inline void __write_unlock(rwlock_t *lock)
  199. {
  200. rwlock_release(&lock->dep_map, 1, _RET_IP_);
  201. _raw_write_unlock(lock);
  202. preempt_enable();
  203. }
  204. static inline void __read_unlock(rwlock_t *lock)
  205. {
  206. rwlock_release(&lock->dep_map, 1, _RET_IP_);
  207. _raw_read_unlock(lock);
  208. preempt_enable();
  209. }
  210. static inline void __spin_unlock_irqrestore(spinlock_t *lock,
  211. unsigned long flags)
  212. {
  213. spin_release(&lock->dep_map, 1, _RET_IP_);
  214. _raw_spin_unlock(lock);
  215. local_irq_restore(flags);
  216. preempt_enable();
  217. }
  218. static inline void __spin_unlock_irq(spinlock_t *lock)
  219. {
  220. spin_release(&lock->dep_map, 1, _RET_IP_);
  221. _raw_spin_unlock(lock);
  222. local_irq_enable();
  223. preempt_enable();
  224. }
  225. static inline void __spin_unlock_bh(spinlock_t *lock)
  226. {
  227. spin_release(&lock->dep_map, 1, _RET_IP_);
  228. _raw_spin_unlock(lock);
  229. preempt_enable_no_resched();
  230. local_bh_enable_ip((unsigned long)__builtin_return_address(0));
  231. }
  232. static inline void __read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
  233. {
  234. rwlock_release(&lock->dep_map, 1, _RET_IP_);
  235. _raw_read_unlock(lock);
  236. local_irq_restore(flags);
  237. preempt_enable();
  238. }
  239. static inline void __read_unlock_irq(rwlock_t *lock)
  240. {
  241. rwlock_release(&lock->dep_map, 1, _RET_IP_);
  242. _raw_read_unlock(lock);
  243. local_irq_enable();
  244. preempt_enable();
  245. }
  246. static inline void __read_unlock_bh(rwlock_t *lock)
  247. {
  248. rwlock_release(&lock->dep_map, 1, _RET_IP_);
  249. _raw_read_unlock(lock);
  250. preempt_enable_no_resched();
  251. local_bh_enable_ip((unsigned long)__builtin_return_address(0));
  252. }
  253. static inline void __write_unlock_irqrestore(rwlock_t *lock,
  254. unsigned long flags)
  255. {
  256. rwlock_release(&lock->dep_map, 1, _RET_IP_);
  257. _raw_write_unlock(lock);
  258. local_irq_restore(flags);
  259. preempt_enable();
  260. }
  261. static inline void __write_unlock_irq(rwlock_t *lock)
  262. {
  263. rwlock_release(&lock->dep_map, 1, _RET_IP_);
  264. _raw_write_unlock(lock);
  265. local_irq_enable();
  266. preempt_enable();
  267. }
  268. static inline void __write_unlock_bh(rwlock_t *lock)
  269. {
  270. rwlock_release(&lock->dep_map, 1, _RET_IP_);
  271. _raw_write_unlock(lock);
  272. preempt_enable_no_resched();
  273. local_bh_enable_ip((unsigned long)__builtin_return_address(0));
  274. }
  275. static inline int __spin_trylock_bh(spinlock_t *lock)
  276. {
  277. local_bh_disable();
  278. preempt_disable();
  279. if (_raw_spin_trylock(lock)) {
  280. spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
  281. return 1;
  282. }
  283. preempt_enable_no_resched();
  284. local_bh_enable_ip((unsigned long)__builtin_return_address(0));
  285. return 0;
  286. }
  287. #endif /* __LINUX_SPINLOCK_API_SMP_H */