mutex.h 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192
  1. /*
  2. * Mutexes: blocking mutual exclusion locks
  3. *
  4. * started by Ingo Molnar:
  5. *
  6. * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  7. *
  8. * This file contains the main data structure and API definitions.
  9. */
  10. #ifndef __LINUX_MUTEX_H
  11. #define __LINUX_MUTEX_H
  12. #include <asm/current.h>
  13. #include <linux/list.h>
  14. #include <linux/spinlock_types.h>
  15. #include <linux/linkage.h>
  16. #include <linux/lockdep.h>
  17. #include <linux/atomic.h>
  18. #include <asm/processor.h>
  19. #include <linux/osq_lock.h>
  20. #include <linux/debug_locks.h>
  21. /*
  22. * Simple, straightforward mutexes with strict semantics:
  23. *
  24. * - only one task can hold the mutex at a time
  25. * - only the owner can unlock the mutex
  26. * - multiple unlocks are not permitted
  27. * - recursive locking is not permitted
  28. * - a mutex object must be initialized via the API
  29. * - a mutex object must not be initialized via memset or copying
  30. * - task may not exit with mutex held
  31. * - memory areas where held locks reside must not be freed
  32. * - held mutexes must not be reinitialized
  33. * - mutexes may not be used in hardware or software interrupt
  34. * contexts such as tasklets and timers
  35. *
  36. * These semantics are fully enforced when DEBUG_MUTEXES is
  37. * enabled. Furthermore, besides enforcing the above rules, the mutex
  38. * debugging code also implements a number of additional features
  39. * that make lock debugging easier and faster:
  40. *
  41. * - uses symbolic names of mutexes, whenever they are printed in debug output
  42. * - point-of-acquire tracking, symbolic lookup of function names
  43. * - list of all locks held in the system, printout of them
  44. * - owner tracking
  45. * - detects self-recursing locks and prints out all relevant info
  46. * - detects multi-task circular deadlocks and prints out all affected
  47. * locks and tasks (and only those tasks)
  48. */
  49. struct mutex {
  50. atomic_long_t owner;
  51. spinlock_t wait_lock;
  52. #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
  53. struct optimistic_spin_queue osq; /* Spinner MCS lock */
  54. #endif
  55. struct list_head wait_list;
  56. #ifdef CONFIG_DEBUG_MUTEXES
  57. void *magic;
  58. #endif
  59. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  60. struct lockdep_map dep_map;
  61. #endif
  62. };
  63. static inline struct task_struct *__mutex_owner(struct mutex *lock)
  64. {
  65. return (struct task_struct *)(atomic_long_read(&lock->owner) & ~0x03);
  66. }
  67. /*
  68. * This is the control structure for tasks blocked on mutex,
  69. * which resides on the blocked task's kernel stack:
  70. */
  71. struct mutex_waiter {
  72. struct list_head list;
  73. struct task_struct *task;
  74. #ifdef CONFIG_DEBUG_MUTEXES
  75. void *magic;
  76. #endif
  77. };
  78. #ifdef CONFIG_DEBUG_MUTEXES
  79. #define __DEBUG_MUTEX_INITIALIZER(lockname) \
  80. , .magic = &lockname
  81. extern void mutex_destroy(struct mutex *lock);
  82. #else
  83. # define __DEBUG_MUTEX_INITIALIZER(lockname)
  84. static inline void mutex_destroy(struct mutex *lock) {}
  85. #endif
  86. /**
  87. * mutex_init - initialize the mutex
  88. * @mutex: the mutex to be initialized
  89. *
  90. * Initialize the mutex to unlocked state.
  91. *
  92. * It is not allowed to initialize an already locked mutex.
  93. */
  94. #define mutex_init(mutex) \
  95. do { \
  96. static struct lock_class_key __key; \
  97. \
  98. __mutex_init((mutex), #mutex, &__key); \
  99. } while (0)
  100. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  101. # define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
  102. , .dep_map = { .name = #lockname }
  103. #else
  104. # define __DEP_MAP_MUTEX_INITIALIZER(lockname)
  105. #endif
  106. #define __MUTEX_INITIALIZER(lockname) \
  107. { .owner = ATOMIC_LONG_INIT(0) \
  108. , .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
  109. , .wait_list = LIST_HEAD_INIT(lockname.wait_list) \
  110. __DEBUG_MUTEX_INITIALIZER(lockname) \
  111. __DEP_MAP_MUTEX_INITIALIZER(lockname) }
  112. #define DEFINE_MUTEX(mutexname) \
  113. struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
  114. extern void __mutex_init(struct mutex *lock, const char *name,
  115. struct lock_class_key *key);
  116. /**
  117. * mutex_is_locked - is the mutex locked
  118. * @lock: the mutex to be queried
  119. *
  120. * Returns 1 if the mutex is locked, 0 if unlocked.
  121. */
  122. static inline int mutex_is_locked(struct mutex *lock)
  123. {
  124. /*
  125. * XXX think about spin_is_locked
  126. */
  127. return __mutex_owner(lock) != NULL;
  128. }
  129. /*
  130. * See kernel/locking/mutex.c for detailed documentation of these APIs.
  131. * Also see Documentation/locking/mutex-design.txt.
  132. */
  133. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  134. extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
  135. extern void _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock);
  136. extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock,
  137. unsigned int subclass);
  138. extern int __must_check mutex_lock_killable_nested(struct mutex *lock,
  139. unsigned int subclass);
  140. #define mutex_lock(lock) mutex_lock_nested(lock, 0)
  141. #define mutex_lock_interruptible(lock) mutex_lock_interruptible_nested(lock, 0)
  142. #define mutex_lock_killable(lock) mutex_lock_killable_nested(lock, 0)
  143. #define mutex_lock_nest_lock(lock, nest_lock) \
  144. do { \
  145. typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \
  146. _mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \
  147. } while (0)
  148. #else
  149. extern void mutex_lock(struct mutex *lock);
  150. extern int __must_check mutex_lock_interruptible(struct mutex *lock);
  151. extern int __must_check mutex_lock_killable(struct mutex *lock);
  152. # define mutex_lock_nested(lock, subclass) mutex_lock(lock)
  153. # define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock)
  154. # define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock)
  155. # define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock)
  156. #endif
  157. /*
  158. * NOTE: mutex_trylock() follows the spin_trylock() convention,
  159. * not the down_trylock() convention!
  160. *
  161. * Returns 1 if the mutex has been acquired successfully, and 0 on contention.
  162. */
  163. extern int mutex_trylock(struct mutex *lock);
  164. extern void mutex_unlock(struct mutex *lock);
  165. extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
  166. #endif /* __LINUX_MUTEX_H */