mutex.h 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * Mutexes: blocking mutual exclusion locks
  4. *
  5. * started by Ingo Molnar:
  6. *
  7. * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  8. *
  9. * This file contains the main data structure and API definitions.
  10. */
  11. #ifndef __LINUX_MUTEX_H
  12. #define __LINUX_MUTEX_H
  13. #include <asm/current.h>
  14. #include <linux/list.h>
  15. #include <linux/spinlock_types.h>
  16. #include <linux/lockdep.h>
  17. #include <linux/atomic.h>
  18. #include <asm/processor.h>
  19. #include <linux/osq_lock.h>
  20. #include <linux/debug_locks.h>
  21. struct ww_acquire_ctx;
  22. /*
  23. * Simple, straightforward mutexes with strict semantics:
  24. *
  25. * - only one task can hold the mutex at a time
  26. * - only the owner can unlock the mutex
  27. * - multiple unlocks are not permitted
  28. * - recursive locking is not permitted
  29. * - a mutex object must be initialized via the API
  30. * - a mutex object must not be initialized via memset or copying
  31. * - task may not exit with mutex held
  32. * - memory areas where held locks reside must not be freed
  33. * - held mutexes must not be reinitialized
  34. * - mutexes may not be used in hardware or software interrupt
  35. * contexts such as tasklets and timers
  36. *
  37. * These semantics are fully enforced when DEBUG_MUTEXES is
  38. * enabled. Furthermore, besides enforcing the above rules, the mutex
  39. * debugging code also implements a number of additional features
  40. * that make lock debugging easier and faster:
  41. *
  42. * - uses symbolic names of mutexes, whenever they are printed in debug output
  43. * - point-of-acquire tracking, symbolic lookup of function names
  44. * - list of all locks held in the system, printout of them
  45. * - owner tracking
  46. * - detects self-recursing locks and prints out all relevant info
  47. * - detects multi-task circular deadlocks and prints out all affected
  48. * locks and tasks (and only those tasks)
  49. */
  50. struct mutex {
  51. atomic_long_t owner;
  52. spinlock_t wait_lock;
  53. #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
  54. struct optimistic_spin_queue osq; /* Spinner MCS lock */
  55. #endif
  56. struct list_head wait_list;
  57. #ifdef CONFIG_DEBUG_MUTEXES
  58. void *magic;
  59. #endif
  60. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  61. struct lockdep_map dep_map;
  62. #endif
  63. };
  64. /*
  65. * Internal helper function; C doesn't allow us to hide it :/
  66. *
  67. * DO NOT USE (outside of mutex code).
  68. */
  69. static inline struct task_struct *__mutex_owner(struct mutex *lock)
  70. {
  71. return (struct task_struct *)(atomic_long_read(&lock->owner) & ~0x07);
  72. }
  73. /*
  74. * This is the control structure for tasks blocked on mutex,
  75. * which resides on the blocked task's kernel stack:
  76. */
  77. struct mutex_waiter {
  78. struct list_head list;
  79. struct task_struct *task;
  80. struct ww_acquire_ctx *ww_ctx;
  81. #ifdef CONFIG_DEBUG_MUTEXES
  82. void *magic;
  83. #endif
  84. };
  85. #ifdef CONFIG_DEBUG_MUTEXES
  86. #define __DEBUG_MUTEX_INITIALIZER(lockname) \
  87. , .magic = &lockname
  88. extern void mutex_destroy(struct mutex *lock);
  89. #else
  90. # define __DEBUG_MUTEX_INITIALIZER(lockname)
  91. static inline void mutex_destroy(struct mutex *lock) {}
  92. #endif
  93. /**
  94. * mutex_init - initialize the mutex
  95. * @mutex: the mutex to be initialized
  96. *
  97. * Initialize the mutex to unlocked state.
  98. *
  99. * It is not allowed to initialize an already locked mutex.
  100. */
  101. #define mutex_init(mutex) \
  102. do { \
  103. static struct lock_class_key __key; \
  104. \
  105. __mutex_init((mutex), #mutex, &__key); \
  106. } while (0)
  107. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  108. # define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
  109. , .dep_map = { .name = #lockname }
  110. #else
  111. # define __DEP_MAP_MUTEX_INITIALIZER(lockname)
  112. #endif
  113. #define __MUTEX_INITIALIZER(lockname) \
  114. { .owner = ATOMIC_LONG_INIT(0) \
  115. , .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
  116. , .wait_list = LIST_HEAD_INIT(lockname.wait_list) \
  117. __DEBUG_MUTEX_INITIALIZER(lockname) \
  118. __DEP_MAP_MUTEX_INITIALIZER(lockname) }
  119. #define DEFINE_MUTEX(mutexname) \
  120. struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
  121. extern void __mutex_init(struct mutex *lock, const char *name,
  122. struct lock_class_key *key);
  123. /**
  124. * mutex_is_locked - is the mutex locked
  125. * @lock: the mutex to be queried
  126. *
  127. * Returns true if the mutex is locked, false if unlocked.
  128. */
  129. static inline bool mutex_is_locked(struct mutex *lock)
  130. {
  131. return __mutex_owner(lock) != NULL;
  132. }
  133. /*
  134. * See kernel/locking/mutex.c for detailed documentation of these APIs.
  135. * Also see Documentation/locking/mutex-design.txt.
  136. */
  137. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  138. extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
  139. extern void _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock);
  140. extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock,
  141. unsigned int subclass);
  142. extern int __must_check mutex_lock_killable_nested(struct mutex *lock,
  143. unsigned int subclass);
  144. extern void mutex_lock_io_nested(struct mutex *lock, unsigned int subclass);
  145. #define mutex_lock(lock) mutex_lock_nested(lock, 0)
  146. #define mutex_lock_interruptible(lock) mutex_lock_interruptible_nested(lock, 0)
  147. #define mutex_lock_killable(lock) mutex_lock_killable_nested(lock, 0)
  148. #define mutex_lock_io(lock) mutex_lock_io_nested(lock, 0)
  149. #define mutex_lock_nest_lock(lock, nest_lock) \
  150. do { \
  151. typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \
  152. _mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \
  153. } while (0)
  154. #else
  155. extern void mutex_lock(struct mutex *lock);
  156. extern int __must_check mutex_lock_interruptible(struct mutex *lock);
  157. extern int __must_check mutex_lock_killable(struct mutex *lock);
  158. extern void mutex_lock_io(struct mutex *lock);
  159. # define mutex_lock_nested(lock, subclass) mutex_lock(lock)
  160. # define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock)
  161. # define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock)
  162. # define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock)
  163. # define mutex_lock_io_nested(lock, subclass) mutex_lock(lock)
  164. #endif
  165. /*
  166. * NOTE: mutex_trylock() follows the spin_trylock() convention,
  167. * not the down_trylock() convention!
  168. *
  169. * Returns 1 if the mutex has been acquired successfully, and 0 on contention.
  170. */
  171. extern int mutex_trylock(struct mutex *lock);
  172. extern void mutex_unlock(struct mutex *lock);
  173. extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
  174. /*
  175. * These values are chosen such that FAIL and SUCCESS match the
  176. * values of the regular mutex_trylock().
  177. */
  178. enum mutex_trylock_recursive_enum {
  179. MUTEX_TRYLOCK_FAILED = 0,
  180. MUTEX_TRYLOCK_SUCCESS = 1,
  181. MUTEX_TRYLOCK_RECURSIVE,
  182. };
  183. /**
  184. * mutex_trylock_recursive - trylock variant that allows recursive locking
  185. * @lock: mutex to be locked
  186. *
  187. * This function should not be used, _ever_. It is purely for hysterical GEM
  188. * raisins, and once those are gone this will be removed.
  189. *
  190. * Returns:
  191. * - MUTEX_TRYLOCK_FAILED - trylock failed,
  192. * - MUTEX_TRYLOCK_SUCCESS - lock acquired,
  193. * - MUTEX_TRYLOCK_RECURSIVE - we already owned the lock.
  194. */
  195. static inline /* __deprecated */ __must_check enum mutex_trylock_recursive_enum
  196. mutex_trylock_recursive(struct mutex *lock)
  197. {
  198. if (unlikely(__mutex_owner(lock) == current))
  199. return MUTEX_TRYLOCK_RECURSIVE;
  200. return mutex_trylock(lock);
  201. }
  202. #endif /* __LINUX_MUTEX_H */