wait_bit.c 9.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355
  1. /*
  2. * The implementation of the wait_bit*() and related waiting APIs:
  3. */
  4. #include "sched.h"
  5. #define WAIT_TABLE_BITS 8
  6. #define WAIT_TABLE_SIZE (1 << WAIT_TABLE_BITS)
  7. static wait_queue_head_t bit_wait_table[WAIT_TABLE_SIZE] __cacheline_aligned;
  8. wait_queue_head_t *bit_waitqueue(void *word, int bit)
  9. {
  10. const int shift = BITS_PER_LONG == 32 ? 5 : 6;
  11. unsigned long val = (unsigned long)word << shift | bit;
  12. return bit_wait_table + hash_long(val, WAIT_TABLE_BITS);
  13. }
  14. EXPORT_SYMBOL(bit_waitqueue);
  15. int wake_bit_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *arg)
  16. {
  17. struct wait_bit_key *key = arg;
  18. struct wait_bit_queue_entry *wait_bit = container_of(wq_entry, struct wait_bit_queue_entry, wq_entry);
  19. if (wait_bit->key.flags != key->flags ||
  20. wait_bit->key.bit_nr != key->bit_nr ||
  21. test_bit(key->bit_nr, key->flags))
  22. return 0;
  23. return autoremove_wake_function(wq_entry, mode, sync, key);
  24. }
  25. EXPORT_SYMBOL(wake_bit_function);
  26. /*
  27. * To allow interruptible waiting and asynchronous (i.e. nonblocking)
  28. * waiting, the actions of __wait_on_bit() and __wait_on_bit_lock() are
  29. * permitted return codes. Nonzero return codes halt waiting and return.
  30. */
  31. int __sched
  32. __wait_on_bit(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry,
  33. wait_bit_action_f *action, unsigned mode)
  34. {
  35. int ret = 0;
  36. do {
  37. prepare_to_wait(wq_head, &wbq_entry->wq_entry, mode);
  38. if (test_bit(wbq_entry->key.bit_nr, wbq_entry->key.flags))
  39. ret = (*action)(&wbq_entry->key, mode);
  40. } while (test_bit(wbq_entry->key.bit_nr, wbq_entry->key.flags) && !ret);
  41. finish_wait(wq_head, &wbq_entry->wq_entry);
  42. return ret;
  43. }
  44. EXPORT_SYMBOL(__wait_on_bit);
  45. int __sched out_of_line_wait_on_bit(void *word, int bit,
  46. wait_bit_action_f *action, unsigned mode)
  47. {
  48. struct wait_queue_head *wq_head = bit_waitqueue(word, bit);
  49. DEFINE_WAIT_BIT(wq_entry, word, bit);
  50. return __wait_on_bit(wq_head, &wq_entry, action, mode);
  51. }
  52. EXPORT_SYMBOL(out_of_line_wait_on_bit);
  53. int __sched out_of_line_wait_on_bit_timeout(
  54. void *word, int bit, wait_bit_action_f *action,
  55. unsigned mode, unsigned long timeout)
  56. {
  57. struct wait_queue_head *wq_head = bit_waitqueue(word, bit);
  58. DEFINE_WAIT_BIT(wq_entry, word, bit);
  59. wq_entry.key.timeout = jiffies + timeout;
  60. return __wait_on_bit(wq_head, &wq_entry, action, mode);
  61. }
  62. EXPORT_SYMBOL_GPL(out_of_line_wait_on_bit_timeout);
  63. int __sched
  64. __wait_on_bit_lock(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry,
  65. wait_bit_action_f *action, unsigned mode)
  66. {
  67. int ret = 0;
  68. for (;;) {
  69. prepare_to_wait_exclusive(wq_head, &wbq_entry->wq_entry, mode);
  70. if (test_bit(wbq_entry->key.bit_nr, wbq_entry->key.flags)) {
  71. ret = action(&wbq_entry->key, mode);
  72. /*
  73. * See the comment in prepare_to_wait_event().
  74. * finish_wait() does not necessarily takes wwq_head->lock,
  75. * but test_and_set_bit() implies mb() which pairs with
  76. * smp_mb__after_atomic() before wake_up_page().
  77. */
  78. if (ret)
  79. finish_wait(wq_head, &wbq_entry->wq_entry);
  80. }
  81. if (!test_and_set_bit(wbq_entry->key.bit_nr, wbq_entry->key.flags)) {
  82. if (!ret)
  83. finish_wait(wq_head, &wbq_entry->wq_entry);
  84. return 0;
  85. } else if (ret) {
  86. return ret;
  87. }
  88. }
  89. }
  90. EXPORT_SYMBOL(__wait_on_bit_lock);
  91. int __sched out_of_line_wait_on_bit_lock(void *word, int bit,
  92. wait_bit_action_f *action, unsigned mode)
  93. {
  94. struct wait_queue_head *wq_head = bit_waitqueue(word, bit);
  95. DEFINE_WAIT_BIT(wq_entry, word, bit);
  96. return __wait_on_bit_lock(wq_head, &wq_entry, action, mode);
  97. }
  98. EXPORT_SYMBOL(out_of_line_wait_on_bit_lock);
  99. void __wake_up_bit(struct wait_queue_head *wq_head, void *word, int bit)
  100. {
  101. struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit);
  102. if (waitqueue_active(wq_head))
  103. __wake_up(wq_head, TASK_NORMAL, 1, &key);
  104. }
  105. EXPORT_SYMBOL(__wake_up_bit);
  106. /**
  107. * wake_up_bit - wake up a waiter on a bit
  108. * @word: the word being waited on, a kernel virtual address
  109. * @bit: the bit of the word being waited on
  110. *
  111. * There is a standard hashed waitqueue table for generic use. This
  112. * is the part of the hashtable's accessor API that wakes up waiters
  113. * on a bit. For instance, if one were to have waiters on a bitflag,
  114. * one would call wake_up_bit() after clearing the bit.
  115. *
  116. * In order for this to function properly, as it uses waitqueue_active()
  117. * internally, some kind of memory barrier must be done prior to calling
  118. * this. Typically, this will be smp_mb__after_atomic(), but in some
  119. * cases where bitflags are manipulated non-atomically under a lock, one
  120. * may need to use a less regular barrier, such fs/inode.c's smp_mb(),
  121. * because spin_unlock() does not guarantee a memory barrier.
  122. */
  123. void wake_up_bit(void *word, int bit)
  124. {
  125. __wake_up_bit(bit_waitqueue(word, bit), word, bit);
  126. }
  127. EXPORT_SYMBOL(wake_up_bit);
  128. wait_queue_head_t *__var_waitqueue(void *p)
  129. {
  130. if (BITS_PER_LONG == 64) {
  131. unsigned long q = (unsigned long)p;
  132. return bit_waitqueue((void *)(q & ~1), q & 1);
  133. }
  134. return bit_waitqueue(p, 0);
  135. }
  136. EXPORT_SYMBOL(__var_waitqueue);
  137. static int
  138. var_wake_function(struct wait_queue_entry *wq_entry, unsigned int mode,
  139. int sync, void *arg)
  140. {
  141. struct wait_bit_key *key = arg;
  142. struct wait_bit_queue_entry *wbq_entry =
  143. container_of(wq_entry, struct wait_bit_queue_entry, wq_entry);
  144. if (wbq_entry->key.flags != key->flags ||
  145. wbq_entry->key.bit_nr != key->bit_nr)
  146. return 0;
  147. return autoremove_wake_function(wq_entry, mode, sync, key);
  148. }
  149. void init_wait_var_entry(struct wait_bit_queue_entry *wbq_entry, void *var, int flags)
  150. {
  151. *wbq_entry = (struct wait_bit_queue_entry){
  152. .key = {
  153. .flags = (var),
  154. .bit_nr = -1,
  155. },
  156. .wq_entry = {
  157. .private = current,
  158. .func = var_wake_function,
  159. .entry = LIST_HEAD_INIT(wbq_entry->wq_entry.entry),
  160. },
  161. };
  162. }
  163. EXPORT_SYMBOL(init_wait_var_entry);
  164. void wake_up_var(void *var)
  165. {
  166. __wake_up_bit(__var_waitqueue(var), var, -1);
  167. }
  168. EXPORT_SYMBOL(wake_up_var);
  169. /*
  170. * Manipulate the atomic_t address to produce a better bit waitqueue table hash
  171. * index (we're keying off bit -1, but that would produce a horrible hash
  172. * value).
  173. */
  174. static inline wait_queue_head_t *atomic_t_waitqueue(atomic_t *p)
  175. {
  176. if (BITS_PER_LONG == 64) {
  177. unsigned long q = (unsigned long)p;
  178. return bit_waitqueue((void *)(q & ~1), q & 1);
  179. }
  180. return bit_waitqueue(p, 0);
  181. }
  182. static int wake_atomic_t_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync,
  183. void *arg)
  184. {
  185. struct wait_bit_key *key = arg;
  186. struct wait_bit_queue_entry *wait_bit = container_of(wq_entry, struct wait_bit_queue_entry, wq_entry);
  187. atomic_t *val = key->flags;
  188. if (wait_bit->key.flags != key->flags ||
  189. wait_bit->key.bit_nr != key->bit_nr ||
  190. atomic_read(val) != 0)
  191. return 0;
  192. return autoremove_wake_function(wq_entry, mode, sync, key);
  193. }
  194. /*
  195. * To allow interruptible waiting and asynchronous (i.e. nonblocking) waiting,
  196. * the actions of __wait_on_atomic_t() are permitted return codes. Nonzero
  197. * return codes halt waiting and return.
  198. */
  199. static __sched
  200. int __wait_on_atomic_t(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry,
  201. wait_atomic_t_action_f action, unsigned int mode)
  202. {
  203. atomic_t *val;
  204. int ret = 0;
  205. do {
  206. prepare_to_wait(wq_head, &wbq_entry->wq_entry, mode);
  207. val = wbq_entry->key.flags;
  208. if (atomic_read(val) == 0)
  209. break;
  210. ret = (*action)(val, mode);
  211. } while (!ret && atomic_read(val) != 0);
  212. finish_wait(wq_head, &wbq_entry->wq_entry);
  213. return ret;
  214. }
  215. #define DEFINE_WAIT_ATOMIC_T(name, p) \
  216. struct wait_bit_queue_entry name = { \
  217. .key = __WAIT_ATOMIC_T_KEY_INITIALIZER(p), \
  218. .wq_entry = { \
  219. .private = current, \
  220. .func = wake_atomic_t_function, \
  221. .entry = \
  222. LIST_HEAD_INIT((name).wq_entry.entry), \
  223. }, \
  224. }
  225. __sched int out_of_line_wait_on_atomic_t(atomic_t *p,
  226. wait_atomic_t_action_f action,
  227. unsigned int mode)
  228. {
  229. struct wait_queue_head *wq_head = atomic_t_waitqueue(p);
  230. DEFINE_WAIT_ATOMIC_T(wq_entry, p);
  231. return __wait_on_atomic_t(wq_head, &wq_entry, action, mode);
  232. }
  233. EXPORT_SYMBOL(out_of_line_wait_on_atomic_t);
  234. __sched int atomic_t_wait(atomic_t *counter, unsigned int mode)
  235. {
  236. schedule();
  237. if (signal_pending_state(mode, current))
  238. return -EINTR;
  239. return 0;
  240. }
  241. EXPORT_SYMBOL(atomic_t_wait);
  242. /**
  243. * wake_up_atomic_t - Wake up a waiter on a atomic_t
  244. * @p: The atomic_t being waited on, a kernel virtual address
  245. *
  246. * Wake up anyone waiting for the atomic_t to go to zero.
  247. *
  248. * Abuse the bit-waker function and its waitqueue hash table set (the atomic_t
  249. * check is done by the waiter's wake function, not the by the waker itself).
  250. */
  251. void wake_up_atomic_t(atomic_t *p)
  252. {
  253. __wake_up_bit(atomic_t_waitqueue(p), p, WAIT_ATOMIC_T_BIT_NR);
  254. }
  255. EXPORT_SYMBOL(wake_up_atomic_t);
  256. __sched int bit_wait(struct wait_bit_key *word, int mode)
  257. {
  258. schedule();
  259. if (signal_pending_state(mode, current))
  260. return -EINTR;
  261. return 0;
  262. }
  263. EXPORT_SYMBOL(bit_wait);
  264. __sched int bit_wait_io(struct wait_bit_key *word, int mode)
  265. {
  266. io_schedule();
  267. if (signal_pending_state(mode, current))
  268. return -EINTR;
  269. return 0;
  270. }
  271. EXPORT_SYMBOL(bit_wait_io);
  272. __sched int bit_wait_timeout(struct wait_bit_key *word, int mode)
  273. {
  274. unsigned long now = READ_ONCE(jiffies);
  275. if (time_after_eq(now, word->timeout))
  276. return -EAGAIN;
  277. schedule_timeout(word->timeout - now);
  278. if (signal_pending_state(mode, current))
  279. return -EINTR;
  280. return 0;
  281. }
  282. EXPORT_SYMBOL_GPL(bit_wait_timeout);
  283. __sched int bit_wait_io_timeout(struct wait_bit_key *word, int mode)
  284. {
  285. unsigned long now = READ_ONCE(jiffies);
  286. if (time_after_eq(now, word->timeout))
  287. return -EAGAIN;
  288. io_schedule_timeout(word->timeout - now);
  289. if (signal_pending_state(mode, current))
  290. return -EINTR;
  291. return 0;
  292. }
  293. EXPORT_SYMBOL_GPL(bit_wait_io_timeout);
  294. void __init wait_bit_init(void)
  295. {
  296. int i;
  297. for (i = 0; i < WAIT_TABLE_SIZE; i++)
  298. init_waitqueue_head(bit_wait_table + i);
  299. }