wait.h 42 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219
  1. #ifndef _LINUX_WAIT_H
  2. #define _LINUX_WAIT_H
  3. /*
  4. * Linux wait queue related types and methods
  5. */
  6. #include <linux/list.h>
  7. #include <linux/stddef.h>
  8. #include <linux/spinlock.h>
  9. #include <asm/current.h>
  10. #include <uapi/linux/wait.h>
  11. typedef struct wait_queue_entry wait_queue_entry_t;
  12. typedef int (*wait_queue_func_t)(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
  13. int default_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
  14. /* wait_queue_entry::flags */
  15. #define WQ_FLAG_EXCLUSIVE 0x01
  16. #define WQ_FLAG_WOKEN 0x02
  17. /*
  18. * A single wait-queue entry structure:
  19. */
  20. struct wait_queue_entry {
  21. unsigned int flags;
  22. void *private;
  23. wait_queue_func_t func;
  24. struct list_head task_list;
  25. };
  26. struct wait_bit_key {
  27. void *flags;
  28. int bit_nr;
  29. #define WAIT_ATOMIC_T_BIT_NR -1
  30. unsigned long timeout;
  31. };
  32. struct wait_bit_queue_entry {
  33. struct wait_bit_key key;
  34. struct wait_queue_entry wq_entry;
  35. };
  36. struct wait_queue_head {
  37. spinlock_t lock;
  38. struct list_head task_list;
  39. };
  40. typedef struct wait_queue_head wait_queue_head_t;
  41. struct task_struct;
  42. /*
  43. * Macros for declaration and initialisaton of the datatypes
  44. */
  45. #define __WAITQUEUE_INITIALIZER(name, tsk) { \
  46. .private = tsk, \
  47. .func = default_wake_function, \
  48. .task_list = { NULL, NULL } }
  49. #define DECLARE_WAITQUEUE(name, tsk) \
  50. struct wait_queue_entry name = __WAITQUEUE_INITIALIZER(name, tsk)
  51. #define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
  52. .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
  53. .task_list = { &(name).task_list, &(name).task_list } }
  54. #define DECLARE_WAIT_QUEUE_HEAD(name) \
  55. struct wait_queue_head name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
  56. #define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
  57. { .flags = word, .bit_nr = bit, }
  58. #define __WAIT_ATOMIC_T_KEY_INITIALIZER(p) \
  59. { .flags = p, .bit_nr = WAIT_ATOMIC_T_BIT_NR, }
  60. extern void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *);
  61. #define init_waitqueue_head(wq_head) \
  62. do { \
  63. static struct lock_class_key __key; \
  64. \
  65. __init_waitqueue_head((wq_head), #wq_head, &__key); \
  66. } while (0)
  67. #ifdef CONFIG_LOCKDEP
  68. # define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
  69. ({ init_waitqueue_head(&name); name; })
  70. # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
  71. struct wait_queue_head name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
  72. #else
  73. # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
  74. #endif
  75. static inline void init_waitqueue_entry(struct wait_queue_entry *wq_entry, struct task_struct *p)
  76. {
  77. wq_entry->flags = 0;
  78. wq_entry->private = p;
  79. wq_entry->func = default_wake_function;
  80. }
  81. static inline void
  82. init_waitqueue_func_entry(struct wait_queue_entry *wq_entry, wait_queue_func_t func)
  83. {
  84. wq_entry->flags = 0;
  85. wq_entry->private = NULL;
  86. wq_entry->func = func;
  87. }
  88. /**
  89. * waitqueue_active -- locklessly test for waiters on the queue
  90. * @wq_head: the waitqueue to test for waiters
  91. *
  92. * returns true if the wait list is not empty
  93. *
  94. * NOTE: this function is lockless and requires care, incorrect usage _will_
  95. * lead to sporadic and non-obvious failure.
  96. *
  97. * Use either while holding wait_queue_head::lock or when used for wakeups
  98. * with an extra smp_mb() like:
  99. *
  100. * CPU0 - waker CPU1 - waiter
  101. *
  102. * for (;;) {
  103. * @cond = true; prepare_to_wait(&wq, &wait, state);
  104. * smp_mb(); // smp_mb() from set_current_state()
  105. * if (waitqueue_active(wq)) if (@cond)
  106. * wake_up(wq); break;
  107. * schedule();
  108. * }
  109. * finish_wait(&wq, &wait);
  110. *
  111. * Because without the explicit smp_mb() it's possible for the
  112. * waitqueue_active() load to get hoisted over the @cond store such that we'll
  113. * observe an empty wait list while the waiter might not observe @cond.
  114. *
  115. * Also note that this 'optimization' trades a spin_lock() for an smp_mb(),
  116. * which (when the lock is uncontended) are of roughly equal cost.
  117. */
  118. static inline int waitqueue_active(struct wait_queue_head *wq_head)
  119. {
  120. return !list_empty(&wq_head->task_list);
  121. }
  122. /**
  123. * wq_has_sleeper - check if there are any waiting processes
  124. * @wq: wait queue head
  125. *
  126. * Returns true if wq has waiting processes
  127. *
  128. * Please refer to the comment for waitqueue_active.
  129. */
  130. static inline bool wq_has_sleeper(struct wait_queue_head *wq_head)
  131. {
  132. /*
  133. * We need to be sure we are in sync with the
  134. * add_wait_queue modifications to the wait queue.
  135. *
  136. * This memory barrier should be paired with one on the
  137. * waiting side.
  138. */
  139. smp_mb();
  140. return waitqueue_active(wq_head);
  141. }
  142. extern void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
  143. extern void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
  144. extern void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
  145. static inline void __add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
  146. {
  147. list_add(&wq_entry->task_list, &wq_head->task_list);
  148. }
  149. /*
  150. * Used for wake-one threads:
  151. */
  152. static inline void
  153. __add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
  154. {
  155. wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
  156. __add_wait_queue(wq_head, wq_entry);
  157. }
  158. static inline void __add_wait_queue_entry_tail(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
  159. {
  160. list_add_tail(&wq_entry->task_list, &wq_head->task_list);
  161. }
  162. static inline void
  163. __add_wait_queue_entry_tail_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
  164. {
  165. wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
  166. __add_wait_queue_entry_tail(wq_head, wq_entry);
  167. }
  168. static inline void
  169. __remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
  170. {
  171. list_del(&wq_entry->task_list);
  172. }
  173. typedef int wait_bit_action_f(struct wait_bit_key *key, int mode);
  174. void __wake_up(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
  175. void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
  176. void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
  177. void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr);
  178. void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr);
  179. void __wake_up_bit(struct wait_queue_head *wq_head, void *word, int bit);
  180. int __wait_on_bit(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry, wait_bit_action_f *action, unsigned int mode);
  181. int __wait_on_bit_lock(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry, wait_bit_action_f *action, unsigned int mode);
  182. void wake_up_bit(void *word, int bit);
  183. void wake_up_atomic_t(atomic_t *p);
  184. int out_of_line_wait_on_bit(void *word, int, wait_bit_action_f *action, unsigned int mode);
  185. int out_of_line_wait_on_bit_timeout(void *word, int, wait_bit_action_f *action, unsigned int mode, unsigned long timeout);
  186. int out_of_line_wait_on_bit_lock(void *word, int, wait_bit_action_f *action, unsigned int mode);
  187. int out_of_line_wait_on_atomic_t(atomic_t *p, int (*)(atomic_t *), unsigned int mode);
  188. struct wait_queue_head *bit_waitqueue(void *word, int bit);
  189. #define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
  190. #define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
  191. #define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL)
  192. #define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1)
  193. #define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0)
  194. #define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
  195. #define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
  196. #define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
  197. #define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
  198. /*
  199. * Wakeup macros to be used to report events to the targets.
  200. */
  201. #define wake_up_poll(x, m) \
  202. __wake_up(x, TASK_NORMAL, 1, (void *) (m))
  203. #define wake_up_locked_poll(x, m) \
  204. __wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
  205. #define wake_up_interruptible_poll(x, m) \
  206. __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
  207. #define wake_up_interruptible_sync_poll(x, m) \
  208. __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))
  209. #define ___wait_cond_timeout(condition) \
  210. ({ \
  211. bool __cond = (condition); \
  212. if (__cond && !__ret) \
  213. __ret = 1; \
  214. __cond || !__ret; \
  215. })
  216. #define ___wait_is_interruptible(state) \
  217. (!__builtin_constant_p(state) || \
  218. state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE) \
  219. extern void init_wait_entry(struct wait_queue_entry *wq_entry, int flags);
  220. /*
  221. * The below macro ___wait_event() has an explicit shadow of the __ret
  222. * variable when used from the wait_event_*() macros.
  223. *
  224. * This is so that both can use the ___wait_cond_timeout() construct
  225. * to wrap the condition.
  226. *
  227. * The type inconsistency of the wait_event_*() __ret variable is also
  228. * on purpose; we use long where we can return timeout values and int
  229. * otherwise.
  230. */
  231. #define ___wait_event(wq, condition, state, exclusive, ret, cmd) \
  232. ({ \
  233. __label__ __out; \
  234. struct wait_queue_entry __wq_entry; \
  235. long __ret = ret; /* explicit shadow */ \
  236. \
  237. init_wait_entry(&__wq_entry, exclusive ? WQ_FLAG_EXCLUSIVE : 0);\
  238. for (;;) { \
  239. long __int = prepare_to_wait_event(&wq, &__wq_entry, state);\
  240. \
  241. if (condition) \
  242. break; \
  243. \
  244. if (___wait_is_interruptible(state) && __int) { \
  245. __ret = __int; \
  246. goto __out; \
  247. } \
  248. \
  249. cmd; \
  250. } \
  251. finish_wait(&wq, &__wq_entry); \
  252. __out: __ret; \
  253. })
  254. #define __wait_event(wq, condition) \
  255. (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
  256. schedule())
  257. /**
  258. * wait_event - sleep until a condition gets true
  259. * @wq: the waitqueue to wait on
  260. * @condition: a C expression for the event to wait for
  261. *
  262. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  263. * @condition evaluates to true. The @condition is checked each time
  264. * the waitqueue @wq is woken up.
  265. *
  266. * wake_up() has to be called after changing any variable that could
  267. * change the result of the wait condition.
  268. */
  269. #define wait_event(wq, condition) \
  270. do { \
  271. might_sleep(); \
  272. if (condition) \
  273. break; \
  274. __wait_event(wq, condition); \
  275. } while (0)
  276. #define __io_wait_event(wq, condition) \
  277. (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
  278. io_schedule())
  279. /*
  280. * io_wait_event() -- like wait_event() but with io_schedule()
  281. */
  282. #define io_wait_event(wq, condition) \
  283. do { \
  284. might_sleep(); \
  285. if (condition) \
  286. break; \
  287. __io_wait_event(wq, condition); \
  288. } while (0)
  289. #define __wait_event_freezable(wq, condition) \
  290. ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
  291. schedule(); try_to_freeze())
  292. /**
  293. * wait_event_freezable - sleep (or freeze) until a condition gets true
  294. * @wq: the waitqueue to wait on
  295. * @condition: a C expression for the event to wait for
  296. *
  297. * The process is put to sleep (TASK_INTERRUPTIBLE -- so as not to contribute
  298. * to system load) until the @condition evaluates to true. The
  299. * @condition is checked each time the waitqueue @wq is woken up.
  300. *
  301. * wake_up() has to be called after changing any variable that could
  302. * change the result of the wait condition.
  303. */
  304. #define wait_event_freezable(wq, condition) \
  305. ({ \
  306. int __ret = 0; \
  307. might_sleep(); \
  308. if (!(condition)) \
  309. __ret = __wait_event_freezable(wq, condition); \
  310. __ret; \
  311. })
  312. #define __wait_event_timeout(wq, condition, timeout) \
  313. ___wait_event(wq, ___wait_cond_timeout(condition), \
  314. TASK_UNINTERRUPTIBLE, 0, timeout, \
  315. __ret = schedule_timeout(__ret))
  316. /**
  317. * wait_event_timeout - sleep until a condition gets true or a timeout elapses
  318. * @wq: the waitqueue to wait on
  319. * @condition: a C expression for the event to wait for
  320. * @timeout: timeout, in jiffies
  321. *
  322. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  323. * @condition evaluates to true. The @condition is checked each time
  324. * the waitqueue @wq is woken up.
  325. *
  326. * wake_up() has to be called after changing any variable that could
  327. * change the result of the wait condition.
  328. *
  329. * Returns:
  330. * 0 if the @condition evaluated to %false after the @timeout elapsed,
  331. * 1 if the @condition evaluated to %true after the @timeout elapsed,
  332. * or the remaining jiffies (at least 1) if the @condition evaluated
  333. * to %true before the @timeout elapsed.
  334. */
  335. #define wait_event_timeout(wq, condition, timeout) \
  336. ({ \
  337. long __ret = timeout; \
  338. might_sleep(); \
  339. if (!___wait_cond_timeout(condition)) \
  340. __ret = __wait_event_timeout(wq, condition, timeout); \
  341. __ret; \
  342. })
  343. #define __wait_event_freezable_timeout(wq, condition, timeout) \
  344. ___wait_event(wq, ___wait_cond_timeout(condition), \
  345. TASK_INTERRUPTIBLE, 0, timeout, \
  346. __ret = schedule_timeout(__ret); try_to_freeze())
  347. /*
  348. * like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid
  349. * increasing load and is freezable.
  350. */
  351. #define wait_event_freezable_timeout(wq, condition, timeout) \
  352. ({ \
  353. long __ret = timeout; \
  354. might_sleep(); \
  355. if (!___wait_cond_timeout(condition)) \
  356. __ret = __wait_event_freezable_timeout(wq, condition, timeout); \
  357. __ret; \
  358. })
  359. #define __wait_event_exclusive_cmd(wq, condition, cmd1, cmd2) \
  360. (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 1, 0, \
  361. cmd1; schedule(); cmd2)
  362. /*
  363. * Just like wait_event_cmd(), except it sets exclusive flag
  364. */
  365. #define wait_event_exclusive_cmd(wq, condition, cmd1, cmd2) \
  366. do { \
  367. if (condition) \
  368. break; \
  369. __wait_event_exclusive_cmd(wq, condition, cmd1, cmd2); \
  370. } while (0)
  371. #define __wait_event_cmd(wq, condition, cmd1, cmd2) \
  372. (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
  373. cmd1; schedule(); cmd2)
  374. /**
  375. * wait_event_cmd - sleep until a condition gets true
  376. * @wq: the waitqueue to wait on
  377. * @condition: a C expression for the event to wait for
  378. * @cmd1: the command will be executed before sleep
  379. * @cmd2: the command will be executed after sleep
  380. *
  381. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  382. * @condition evaluates to true. The @condition is checked each time
  383. * the waitqueue @wq is woken up.
  384. *
  385. * wake_up() has to be called after changing any variable that could
  386. * change the result of the wait condition.
  387. */
  388. #define wait_event_cmd(wq, condition, cmd1, cmd2) \
  389. do { \
  390. if (condition) \
  391. break; \
  392. __wait_event_cmd(wq, condition, cmd1, cmd2); \
  393. } while (0)
  394. #define __wait_event_interruptible(wq, condition) \
  395. ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
  396. schedule())
  397. /**
  398. * wait_event_interruptible - sleep until a condition gets true
  399. * @wq: the waitqueue to wait on
  400. * @condition: a C expression for the event to wait for
  401. *
  402. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  403. * @condition evaluates to true or a signal is received.
  404. * The @condition is checked each time the waitqueue @wq is woken up.
  405. *
  406. * wake_up() has to be called after changing any variable that could
  407. * change the result of the wait condition.
  408. *
  409. * The function will return -ERESTARTSYS if it was interrupted by a
  410. * signal and 0 if @condition evaluated to true.
  411. */
  412. #define wait_event_interruptible(wq, condition) \
  413. ({ \
  414. int __ret = 0; \
  415. might_sleep(); \
  416. if (!(condition)) \
  417. __ret = __wait_event_interruptible(wq, condition); \
  418. __ret; \
  419. })
  420. #define __wait_event_interruptible_timeout(wq, condition, timeout) \
  421. ___wait_event(wq, ___wait_cond_timeout(condition), \
  422. TASK_INTERRUPTIBLE, 0, timeout, \
  423. __ret = schedule_timeout(__ret))
  424. /**
  425. * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
  426. * @wq: the waitqueue to wait on
  427. * @condition: a C expression for the event to wait for
  428. * @timeout: timeout, in jiffies
  429. *
  430. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  431. * @condition evaluates to true or a signal is received.
  432. * The @condition is checked each time the waitqueue @wq is woken up.
  433. *
  434. * wake_up() has to be called after changing any variable that could
  435. * change the result of the wait condition.
  436. *
  437. * Returns:
  438. * 0 if the @condition evaluated to %false after the @timeout elapsed,
  439. * 1 if the @condition evaluated to %true after the @timeout elapsed,
  440. * the remaining jiffies (at least 1) if the @condition evaluated
  441. * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
  442. * interrupted by a signal.
  443. */
  444. #define wait_event_interruptible_timeout(wq, condition, timeout) \
  445. ({ \
  446. long __ret = timeout; \
  447. might_sleep(); \
  448. if (!___wait_cond_timeout(condition)) \
  449. __ret = __wait_event_interruptible_timeout(wq, \
  450. condition, timeout); \
  451. __ret; \
  452. })
  453. #define __wait_event_hrtimeout(wq, condition, timeout, state) \
  454. ({ \
  455. int __ret = 0; \
  456. struct hrtimer_sleeper __t; \
  457. \
  458. hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC, \
  459. HRTIMER_MODE_REL); \
  460. hrtimer_init_sleeper(&__t, current); \
  461. if ((timeout) != KTIME_MAX) \
  462. hrtimer_start_range_ns(&__t.timer, timeout, \
  463. current->timer_slack_ns, \
  464. HRTIMER_MODE_REL); \
  465. \
  466. __ret = ___wait_event(wq, condition, state, 0, 0, \
  467. if (!__t.task) { \
  468. __ret = -ETIME; \
  469. break; \
  470. } \
  471. schedule()); \
  472. \
  473. hrtimer_cancel(&__t.timer); \
  474. destroy_hrtimer_on_stack(&__t.timer); \
  475. __ret; \
  476. })
  477. /**
  478. * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses
  479. * @wq: the waitqueue to wait on
  480. * @condition: a C expression for the event to wait for
  481. * @timeout: timeout, as a ktime_t
  482. *
  483. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  484. * @condition evaluates to true or a signal is received.
  485. * The @condition is checked each time the waitqueue @wq is woken up.
  486. *
  487. * wake_up() has to be called after changing any variable that could
  488. * change the result of the wait condition.
  489. *
  490. * The function returns 0 if @condition became true, or -ETIME if the timeout
  491. * elapsed.
  492. */
  493. #define wait_event_hrtimeout(wq, condition, timeout) \
  494. ({ \
  495. int __ret = 0; \
  496. might_sleep(); \
  497. if (!(condition)) \
  498. __ret = __wait_event_hrtimeout(wq, condition, timeout, \
  499. TASK_UNINTERRUPTIBLE); \
  500. __ret; \
  501. })
  502. /**
  503. * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
  504. * @wq: the waitqueue to wait on
  505. * @condition: a C expression for the event to wait for
  506. * @timeout: timeout, as a ktime_t
  507. *
  508. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  509. * @condition evaluates to true or a signal is received.
  510. * The @condition is checked each time the waitqueue @wq is woken up.
  511. *
  512. * wake_up() has to be called after changing any variable that could
  513. * change the result of the wait condition.
  514. *
  515. * The function returns 0 if @condition became true, -ERESTARTSYS if it was
  516. * interrupted by a signal, or -ETIME if the timeout elapsed.
  517. */
  518. #define wait_event_interruptible_hrtimeout(wq, condition, timeout) \
  519. ({ \
  520. long __ret = 0; \
  521. might_sleep(); \
  522. if (!(condition)) \
  523. __ret = __wait_event_hrtimeout(wq, condition, timeout, \
  524. TASK_INTERRUPTIBLE); \
  525. __ret; \
  526. })
  527. #define __wait_event_interruptible_exclusive(wq, condition) \
  528. ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
  529. schedule())
  530. #define wait_event_interruptible_exclusive(wq, condition) \
  531. ({ \
  532. int __ret = 0; \
  533. might_sleep(); \
  534. if (!(condition)) \
  535. __ret = __wait_event_interruptible_exclusive(wq, condition);\
  536. __ret; \
  537. })
  538. #define __wait_event_killable_exclusive(wq, condition) \
  539. ___wait_event(wq, condition, TASK_KILLABLE, 1, 0, \
  540. schedule())
  541. #define wait_event_killable_exclusive(wq, condition) \
  542. ({ \
  543. int __ret = 0; \
  544. might_sleep(); \
  545. if (!(condition)) \
  546. __ret = __wait_event_killable_exclusive(wq, condition); \
  547. __ret; \
  548. })
  549. #define __wait_event_freezable_exclusive(wq, condition) \
  550. ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
  551. schedule(); try_to_freeze())
  552. #define wait_event_freezable_exclusive(wq, condition) \
  553. ({ \
  554. int __ret = 0; \
  555. might_sleep(); \
  556. if (!(condition)) \
  557. __ret = __wait_event_freezable_exclusive(wq, condition);\
  558. __ret; \
  559. })
  560. extern int do_wait_intr(wait_queue_head_t *, wait_queue_entry_t *);
  561. extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_entry_t *);
  562. #define __wait_event_interruptible_locked(wq, condition, exclusive, fn) \
  563. ({ \
  564. int __ret; \
  565. DEFINE_WAIT(__wait); \
  566. if (exclusive) \
  567. __wait.flags |= WQ_FLAG_EXCLUSIVE; \
  568. do { \
  569. __ret = fn(&(wq), &__wait); \
  570. if (__ret) \
  571. break; \
  572. } while (!(condition)); \
  573. __remove_wait_queue(&(wq), &__wait); \
  574. __set_current_state(TASK_RUNNING); \
  575. __ret; \
  576. })
  577. /**
  578. * wait_event_interruptible_locked - sleep until a condition gets true
  579. * @wq: the waitqueue to wait on
  580. * @condition: a C expression for the event to wait for
  581. *
  582. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  583. * @condition evaluates to true or a signal is received.
  584. * The @condition is checked each time the waitqueue @wq is woken up.
  585. *
  586. * It must be called with wq.lock being held. This spinlock is
  587. * unlocked while sleeping but @condition testing is done while lock
  588. * is held and when this macro exits the lock is held.
  589. *
  590. * The lock is locked/unlocked using spin_lock()/spin_unlock()
  591. * functions which must match the way they are locked/unlocked outside
  592. * of this macro.
  593. *
  594. * wake_up_locked() has to be called after changing any variable that could
  595. * change the result of the wait condition.
  596. *
  597. * The function will return -ERESTARTSYS if it was interrupted by a
  598. * signal and 0 if @condition evaluated to true.
  599. */
  600. #define wait_event_interruptible_locked(wq, condition) \
  601. ((condition) \
  602. ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr))
  603. /**
  604. * wait_event_interruptible_locked_irq - sleep until a condition gets true
  605. * @wq: the waitqueue to wait on
  606. * @condition: a C expression for the event to wait for
  607. *
  608. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  609. * @condition evaluates to true or a signal is received.
  610. * The @condition is checked each time the waitqueue @wq is woken up.
  611. *
  612. * It must be called with wq.lock being held. This spinlock is
  613. * unlocked while sleeping but @condition testing is done while lock
  614. * is held and when this macro exits the lock is held.
  615. *
  616. * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
  617. * functions which must match the way they are locked/unlocked outside
  618. * of this macro.
  619. *
  620. * wake_up_locked() has to be called after changing any variable that could
  621. * change the result of the wait condition.
  622. *
  623. * The function will return -ERESTARTSYS if it was interrupted by a
  624. * signal and 0 if @condition evaluated to true.
  625. */
  626. #define wait_event_interruptible_locked_irq(wq, condition) \
  627. ((condition) \
  628. ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr_irq))
  629. /**
  630. * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
  631. * @wq: the waitqueue to wait on
  632. * @condition: a C expression for the event to wait for
  633. *
  634. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  635. * @condition evaluates to true or a signal is received.
  636. * The @condition is checked each time the waitqueue @wq is woken up.
  637. *
  638. * It must be called with wq.lock being held. This spinlock is
  639. * unlocked while sleeping but @condition testing is done while lock
  640. * is held and when this macro exits the lock is held.
  641. *
  642. * The lock is locked/unlocked using spin_lock()/spin_unlock()
  643. * functions which must match the way they are locked/unlocked outside
  644. * of this macro.
  645. *
  646. * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
  647. * set thus when other process waits process on the list if this
  648. * process is awaken further processes are not considered.
  649. *
  650. * wake_up_locked() has to be called after changing any variable that could
  651. * change the result of the wait condition.
  652. *
  653. * The function will return -ERESTARTSYS if it was interrupted by a
  654. * signal and 0 if @condition evaluated to true.
  655. */
  656. #define wait_event_interruptible_exclusive_locked(wq, condition) \
  657. ((condition) \
  658. ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr))
  659. /**
  660. * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
  661. * @wq: the waitqueue to wait on
  662. * @condition: a C expression for the event to wait for
  663. *
  664. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  665. * @condition evaluates to true or a signal is received.
  666. * The @condition is checked each time the waitqueue @wq is woken up.
  667. *
  668. * It must be called with wq.lock being held. This spinlock is
  669. * unlocked while sleeping but @condition testing is done while lock
  670. * is held and when this macro exits the lock is held.
  671. *
  672. * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
  673. * functions which must match the way they are locked/unlocked outside
  674. * of this macro.
  675. *
  676. * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
  677. * set thus when other process waits process on the list if this
  678. * process is awaken further processes are not considered.
  679. *
  680. * wake_up_locked() has to be called after changing any variable that could
  681. * change the result of the wait condition.
  682. *
  683. * The function will return -ERESTARTSYS if it was interrupted by a
  684. * signal and 0 if @condition evaluated to true.
  685. */
  686. #define wait_event_interruptible_exclusive_locked_irq(wq, condition) \
  687. ((condition) \
  688. ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr_irq))
  689. #define __wait_event_killable(wq, condition) \
  690. ___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
  691. /**
  692. * wait_event_killable - sleep until a condition gets true
  693. * @wq: the waitqueue to wait on
  694. * @condition: a C expression for the event to wait for
  695. *
  696. * The process is put to sleep (TASK_KILLABLE) until the
  697. * @condition evaluates to true or a signal is received.
  698. * The @condition is checked each time the waitqueue @wq is woken up.
  699. *
  700. * wake_up() has to be called after changing any variable that could
  701. * change the result of the wait condition.
  702. *
  703. * The function will return -ERESTARTSYS if it was interrupted by a
  704. * signal and 0 if @condition evaluated to true.
  705. */
  706. #define wait_event_killable(wq, condition) \
  707. ({ \
  708. int __ret = 0; \
  709. might_sleep(); \
  710. if (!(condition)) \
  711. __ret = __wait_event_killable(wq, condition); \
  712. __ret; \
  713. })
  714. #define __wait_event_lock_irq(wq, condition, lock, cmd) \
  715. (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
  716. spin_unlock_irq(&lock); \
  717. cmd; \
  718. schedule(); \
  719. spin_lock_irq(&lock))
  720. /**
  721. * wait_event_lock_irq_cmd - sleep until a condition gets true. The
  722. * condition is checked under the lock. This
  723. * is expected to be called with the lock
  724. * taken.
  725. * @wq: the waitqueue to wait on
  726. * @condition: a C expression for the event to wait for
  727. * @lock: a locked spinlock_t, which will be released before cmd
  728. * and schedule() and reacquired afterwards.
  729. * @cmd: a command which is invoked outside the critical section before
  730. * sleep
  731. *
  732. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  733. * @condition evaluates to true. The @condition is checked each time
  734. * the waitqueue @wq is woken up.
  735. *
  736. * wake_up() has to be called after changing any variable that could
  737. * change the result of the wait condition.
  738. *
  739. * This is supposed to be called while holding the lock. The lock is
  740. * dropped before invoking the cmd and going to sleep and is reacquired
  741. * afterwards.
  742. */
  743. #define wait_event_lock_irq_cmd(wq, condition, lock, cmd) \
  744. do { \
  745. if (condition) \
  746. break; \
  747. __wait_event_lock_irq(wq, condition, lock, cmd); \
  748. } while (0)
  749. /**
  750. * wait_event_lock_irq - sleep until a condition gets true. The
  751. * condition is checked under the lock. This
  752. * is expected to be called with the lock
  753. * taken.
  754. * @wq: the waitqueue to wait on
  755. * @condition: a C expression for the event to wait for
  756. * @lock: a locked spinlock_t, which will be released before schedule()
  757. * and reacquired afterwards.
  758. *
  759. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  760. * @condition evaluates to true. The @condition is checked each time
  761. * the waitqueue @wq is woken up.
  762. *
  763. * wake_up() has to be called after changing any variable that could
  764. * change the result of the wait condition.
  765. *
  766. * This is supposed to be called while holding the lock. The lock is
  767. * dropped before going to sleep and is reacquired afterwards.
  768. */
  769. #define wait_event_lock_irq(wq, condition, lock) \
  770. do { \
  771. if (condition) \
  772. break; \
  773. __wait_event_lock_irq(wq, condition, lock, ); \
  774. } while (0)
  775. #define __wait_event_interruptible_lock_irq(wq, condition, lock, cmd) \
  776. ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
  777. spin_unlock_irq(&lock); \
  778. cmd; \
  779. schedule(); \
  780. spin_lock_irq(&lock))
  781. /**
  782. * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
  783. * The condition is checked under the lock. This is expected to
  784. * be called with the lock taken.
  785. * @wq: the waitqueue to wait on
  786. * @condition: a C expression for the event to wait for
  787. * @lock: a locked spinlock_t, which will be released before cmd and
  788. * schedule() and reacquired afterwards.
  789. * @cmd: a command which is invoked outside the critical section before
  790. * sleep
  791. *
  792. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  793. * @condition evaluates to true or a signal is received. The @condition is
  794. * checked each time the waitqueue @wq is woken up.
  795. *
  796. * wake_up() has to be called after changing any variable that could
  797. * change the result of the wait condition.
  798. *
  799. * This is supposed to be called while holding the lock. The lock is
  800. * dropped before invoking the cmd and going to sleep and is reacquired
  801. * afterwards.
  802. *
  803. * The macro will return -ERESTARTSYS if it was interrupted by a signal
  804. * and 0 if @condition evaluated to true.
  805. */
  806. #define wait_event_interruptible_lock_irq_cmd(wq, condition, lock, cmd) \
  807. ({ \
  808. int __ret = 0; \
  809. if (!(condition)) \
  810. __ret = __wait_event_interruptible_lock_irq(wq, \
  811. condition, lock, cmd); \
  812. __ret; \
  813. })
  814. /**
  815. * wait_event_interruptible_lock_irq - sleep until a condition gets true.
  816. * The condition is checked under the lock. This is expected
  817. * to be called with the lock taken.
  818. * @wq: the waitqueue to wait on
  819. * @condition: a C expression for the event to wait for
  820. * @lock: a locked spinlock_t, which will be released before schedule()
  821. * and reacquired afterwards.
  822. *
  823. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  824. * @condition evaluates to true or signal is received. The @condition is
  825. * checked each time the waitqueue @wq is woken up.
  826. *
  827. * wake_up() has to be called after changing any variable that could
  828. * change the result of the wait condition.
  829. *
  830. * This is supposed to be called while holding the lock. The lock is
  831. * dropped before going to sleep and is reacquired afterwards.
  832. *
  833. * The macro will return -ERESTARTSYS if it was interrupted by a signal
  834. * and 0 if @condition evaluated to true.
  835. */
  836. #define wait_event_interruptible_lock_irq(wq, condition, lock) \
  837. ({ \
  838. int __ret = 0; \
  839. if (!(condition)) \
  840. __ret = __wait_event_interruptible_lock_irq(wq, \
  841. condition, lock,); \
  842. __ret; \
  843. })
  844. #define __wait_event_interruptible_lock_irq_timeout(wq, condition, \
  845. lock, timeout) \
  846. ___wait_event(wq, ___wait_cond_timeout(condition), \
  847. TASK_INTERRUPTIBLE, 0, timeout, \
  848. spin_unlock_irq(&lock); \
  849. __ret = schedule_timeout(__ret); \
  850. spin_lock_irq(&lock));
  851. /**
  852. * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets
  853. * true or a timeout elapses. The condition is checked under
  854. * the lock. This is expected to be called with the lock taken.
  855. * @wq: the waitqueue to wait on
  856. * @condition: a C expression for the event to wait for
  857. * @lock: a locked spinlock_t, which will be released before schedule()
  858. * and reacquired afterwards.
  859. * @timeout: timeout, in jiffies
  860. *
  861. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  862. * @condition evaluates to true or signal is received. The @condition is
  863. * checked each time the waitqueue @wq is woken up.
  864. *
  865. * wake_up() has to be called after changing any variable that could
  866. * change the result of the wait condition.
  867. *
  868. * This is supposed to be called while holding the lock. The lock is
  869. * dropped before going to sleep and is reacquired afterwards.
  870. *
  871. * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
  872. * was interrupted by a signal, and the remaining jiffies otherwise
  873. * if the condition evaluated to true before the timeout elapsed.
  874. */
  875. #define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \
  876. timeout) \
  877. ({ \
  878. long __ret = timeout; \
  879. if (!___wait_cond_timeout(condition)) \
  880. __ret = __wait_event_interruptible_lock_irq_timeout( \
  881. wq, condition, lock, timeout); \
  882. __ret; \
  883. })
  884. /*
  885. * Waitqueues which are removed from the waitqueue_head at wakeup time
  886. */
  887. void prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
  888. void prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
  889. long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
  890. void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
  891. long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout);
  892. int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
  893. int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
  894. int wake_bit_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
  895. #define DEFINE_WAIT_FUNC(name, function) \
  896. struct wait_queue_entry name = { \
  897. .private = current, \
  898. .func = function, \
  899. .task_list = LIST_HEAD_INIT((name).task_list), \
  900. }
  901. #define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
  902. #define DEFINE_WAIT_BIT(name, word, bit) \
  903. struct wait_bit_queue_entry name = { \
  904. .key = __WAIT_BIT_KEY_INITIALIZER(word, bit), \
  905. .wq_entry = { \
  906. .private = current, \
  907. .func = wake_bit_function, \
  908. .task_list = \
  909. LIST_HEAD_INIT((name).wq_entry.task_list), \
  910. }, \
  911. }
  912. #define init_wait(wait) \
  913. do { \
  914. (wait)->private = current; \
  915. (wait)->func = autoremove_wake_function; \
  916. INIT_LIST_HEAD(&(wait)->task_list); \
  917. (wait)->flags = 0; \
  918. } while (0)
  919. extern int bit_wait(struct wait_bit_key *key, int bit);
  920. extern int bit_wait_io(struct wait_bit_key *key, int bit);
  921. extern int bit_wait_timeout(struct wait_bit_key *key, int bit);
  922. extern int bit_wait_io_timeout(struct wait_bit_key *key, int bit);
  923. /**
  924. * wait_on_bit - wait for a bit to be cleared
  925. * @word: the word being waited on, a kernel virtual address
  926. * @bit: the bit of the word being waited on
  927. * @mode: the task state to sleep in
  928. *
  929. * There is a standard hashed waitqueue table for generic use. This
  930. * is the part of the hashtable's accessor API that waits on a bit.
  931. * For instance, if one were to have waiters on a bitflag, one would
  932. * call wait_on_bit() in threads waiting for the bit to clear.
  933. * One uses wait_on_bit() where one is waiting for the bit to clear,
  934. * but has no intention of setting it.
  935. * Returned value will be zero if the bit was cleared, or non-zero
  936. * if the process received a signal and the mode permitted wakeup
  937. * on that signal.
  938. */
  939. static inline int
  940. wait_on_bit(unsigned long *word, int bit, unsigned mode)
  941. {
  942. might_sleep();
  943. if (!test_bit(bit, word))
  944. return 0;
  945. return out_of_line_wait_on_bit(word, bit,
  946. bit_wait,
  947. mode);
  948. }
  949. /**
  950. * wait_on_bit_io - wait for a bit to be cleared
  951. * @word: the word being waited on, a kernel virtual address
  952. * @bit: the bit of the word being waited on
  953. * @mode: the task state to sleep in
  954. *
  955. * Use the standard hashed waitqueue table to wait for a bit
  956. * to be cleared. This is similar to wait_on_bit(), but calls
  957. * io_schedule() instead of schedule() for the actual waiting.
  958. *
  959. * Returned value will be zero if the bit was cleared, or non-zero
  960. * if the process received a signal and the mode permitted wakeup
  961. * on that signal.
  962. */
  963. static inline int
  964. wait_on_bit_io(unsigned long *word, int bit, unsigned mode)
  965. {
  966. might_sleep();
  967. if (!test_bit(bit, word))
  968. return 0;
  969. return out_of_line_wait_on_bit(word, bit,
  970. bit_wait_io,
  971. mode);
  972. }
  973. /**
  974. * wait_on_bit_timeout - wait for a bit to be cleared or a timeout elapses
  975. * @word: the word being waited on, a kernel virtual address
  976. * @bit: the bit of the word being waited on
  977. * @mode: the task state to sleep in
  978. * @timeout: timeout, in jiffies
  979. *
  980. * Use the standard hashed waitqueue table to wait for a bit
  981. * to be cleared. This is similar to wait_on_bit(), except also takes a
  982. * timeout parameter.
  983. *
  984. * Returned value will be zero if the bit was cleared before the
  985. * @timeout elapsed, or non-zero if the @timeout elapsed or process
  986. * received a signal and the mode permitted wakeup on that signal.
  987. */
  988. static inline int
  989. wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode,
  990. unsigned long timeout)
  991. {
  992. might_sleep();
  993. if (!test_bit(bit, word))
  994. return 0;
  995. return out_of_line_wait_on_bit_timeout(word, bit,
  996. bit_wait_timeout,
  997. mode, timeout);
  998. }
  999. /**
  1000. * wait_on_bit_action - wait for a bit to be cleared
  1001. * @word: the word being waited on, a kernel virtual address
  1002. * @bit: the bit of the word being waited on
  1003. * @action: the function used to sleep, which may take special actions
  1004. * @mode: the task state to sleep in
  1005. *
  1006. * Use the standard hashed waitqueue table to wait for a bit
  1007. * to be cleared, and allow the waiting action to be specified.
  1008. * This is like wait_on_bit() but allows fine control of how the waiting
  1009. * is done.
  1010. *
  1011. * Returned value will be zero if the bit was cleared, or non-zero
  1012. * if the process received a signal and the mode permitted wakeup
  1013. * on that signal.
  1014. */
  1015. static inline int
  1016. wait_on_bit_action(unsigned long *word, int bit, wait_bit_action_f *action,
  1017. unsigned mode)
  1018. {
  1019. might_sleep();
  1020. if (!test_bit(bit, word))
  1021. return 0;
  1022. return out_of_line_wait_on_bit(word, bit, action, mode);
  1023. }
  1024. /**
  1025. * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it
  1026. * @word: the word being waited on, a kernel virtual address
  1027. * @bit: the bit of the word being waited on
  1028. * @mode: the task state to sleep in
  1029. *
  1030. * There is a standard hashed waitqueue table for generic use. This
  1031. * is the part of the hashtable's accessor API that waits on a bit
  1032. * when one intends to set it, for instance, trying to lock bitflags.
  1033. * For instance, if one were to have waiters trying to set bitflag
  1034. * and waiting for it to clear before setting it, one would call
  1035. * wait_on_bit() in threads waiting to be able to set the bit.
  1036. * One uses wait_on_bit_lock() where one is waiting for the bit to
  1037. * clear with the intention of setting it, and when done, clearing it.
  1038. *
  1039. * Returns zero if the bit was (eventually) found to be clear and was
  1040. * set. Returns non-zero if a signal was delivered to the process and
  1041. * the @mode allows that signal to wake the process.
  1042. */
  1043. static inline int
  1044. wait_on_bit_lock(unsigned long *word, int bit, unsigned mode)
  1045. {
  1046. might_sleep();
  1047. if (!test_and_set_bit(bit, word))
  1048. return 0;
  1049. return out_of_line_wait_on_bit_lock(word, bit, bit_wait, mode);
  1050. }
  1051. /**
  1052. * wait_on_bit_lock_io - wait for a bit to be cleared, when wanting to set it
  1053. * @word: the word being waited on, a kernel virtual address
  1054. * @bit: the bit of the word being waited on
  1055. * @mode: the task state to sleep in
  1056. *
  1057. * Use the standard hashed waitqueue table to wait for a bit
  1058. * to be cleared and then to atomically set it. This is similar
  1059. * to wait_on_bit(), but calls io_schedule() instead of schedule()
  1060. * for the actual waiting.
  1061. *
  1062. * Returns zero if the bit was (eventually) found to be clear and was
  1063. * set. Returns non-zero if a signal was delivered to the process and
  1064. * the @mode allows that signal to wake the process.
  1065. */
  1066. static inline int
  1067. wait_on_bit_lock_io(unsigned long *word, int bit, unsigned mode)
  1068. {
  1069. might_sleep();
  1070. if (!test_and_set_bit(bit, word))
  1071. return 0;
  1072. return out_of_line_wait_on_bit_lock(word, bit, bit_wait_io, mode);
  1073. }
  1074. /**
  1075. * wait_on_bit_lock_action - wait for a bit to be cleared, when wanting to set it
  1076. * @word: the word being waited on, a kernel virtual address
  1077. * @bit: the bit of the word being waited on
  1078. * @action: the function used to sleep, which may take special actions
  1079. * @mode: the task state to sleep in
  1080. *
  1081. * Use the standard hashed waitqueue table to wait for a bit
  1082. * to be cleared and then to set it, and allow the waiting action
  1083. * to be specified.
  1084. * This is like wait_on_bit() but allows fine control of how the waiting
  1085. * is done.
  1086. *
  1087. * Returns zero if the bit was (eventually) found to be clear and was
  1088. * set. Returns non-zero if a signal was delivered to the process and
  1089. * the @mode allows that signal to wake the process.
  1090. */
  1091. static inline int
  1092. wait_on_bit_lock_action(unsigned long *word, int bit, wait_bit_action_f *action,
  1093. unsigned mode)
  1094. {
  1095. might_sleep();
  1096. if (!test_and_set_bit(bit, word))
  1097. return 0;
  1098. return out_of_line_wait_on_bit_lock(word, bit, action, mode);
  1099. }
  1100. /**
  1101. * wait_on_atomic_t - Wait for an atomic_t to become 0
  1102. * @val: The atomic value being waited on, a kernel virtual address
  1103. * @action: the function used to sleep, which may take special actions
  1104. * @mode: the task state to sleep in
  1105. *
  1106. * Wait for an atomic_t to become 0. We abuse the bit-wait waitqueue table for
  1107. * the purpose of getting a waitqueue, but we set the key to a bit number
  1108. * outside of the target 'word'.
  1109. */
  1110. static inline
  1111. int wait_on_atomic_t(atomic_t *val, int (*action)(atomic_t *), unsigned mode)
  1112. {
  1113. might_sleep();
  1114. if (atomic_read(val) == 0)
  1115. return 0;
  1116. return out_of_line_wait_on_atomic_t(val, action, mode);
  1117. }
  1118. #endif /* _LINUX_WAIT_H */