wait.h 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _LINUX_WAIT_H
  3. #define _LINUX_WAIT_H
  4. /*
  5. * Linux wait queue related types and methods
  6. */
  7. #include <linux/list.h>
  8. #include <linux/stddef.h>
  9. #include <linux/spinlock.h>
  10. #include <asm/current.h>
  11. #include <uapi/linux/wait.h>
  12. typedef struct wait_queue_entry wait_queue_entry_t;
  13. typedef int (*wait_queue_func_t)(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
  14. int default_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
  15. /* wait_queue_entry::flags */
  16. #define WQ_FLAG_EXCLUSIVE 0x01
  17. #define WQ_FLAG_WOKEN 0x02
  18. #define WQ_FLAG_BOOKMARK 0x04
  19. /*
  20. * A single wait-queue entry structure:
  21. */
  22. struct wait_queue_entry {
  23. unsigned int flags;
  24. void *private;
  25. wait_queue_func_t func;
  26. struct list_head entry;
  27. };
  28. struct wait_queue_head {
  29. spinlock_t lock;
  30. struct list_head head;
  31. };
  32. typedef struct wait_queue_head wait_queue_head_t;
  33. struct task_struct;
  34. /*
  35. * Macros for declaration and initialisaton of the datatypes
  36. */
  37. #define __WAITQUEUE_INITIALIZER(name, tsk) { \
  38. .private = tsk, \
  39. .func = default_wake_function, \
  40. .entry = { NULL, NULL } }
  41. #define DECLARE_WAITQUEUE(name, tsk) \
  42. struct wait_queue_entry name = __WAITQUEUE_INITIALIZER(name, tsk)
  43. #define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
  44. .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
  45. .head = { &(name).head, &(name).head } }
  46. #define DECLARE_WAIT_QUEUE_HEAD(name) \
  47. struct wait_queue_head name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
  48. extern void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *);
  49. #define init_waitqueue_head(wq_head) \
  50. do { \
  51. static struct lock_class_key __key; \
  52. \
  53. __init_waitqueue_head((wq_head), #wq_head, &__key); \
  54. } while (0)
  55. #ifdef CONFIG_LOCKDEP
  56. # define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
  57. ({ init_waitqueue_head(&name); name; })
  58. # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
  59. struct wait_queue_head name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
  60. #else
  61. # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
  62. #endif
  63. static inline void init_waitqueue_entry(struct wait_queue_entry *wq_entry, struct task_struct *p)
  64. {
  65. wq_entry->flags = 0;
  66. wq_entry->private = p;
  67. wq_entry->func = default_wake_function;
  68. }
  69. static inline void
  70. init_waitqueue_func_entry(struct wait_queue_entry *wq_entry, wait_queue_func_t func)
  71. {
  72. wq_entry->flags = 0;
  73. wq_entry->private = NULL;
  74. wq_entry->func = func;
  75. }
  76. /**
  77. * waitqueue_active -- locklessly test for waiters on the queue
  78. * @wq_head: the waitqueue to test for waiters
  79. *
  80. * returns true if the wait list is not empty
  81. *
  82. * NOTE: this function is lockless and requires care, incorrect usage _will_
  83. * lead to sporadic and non-obvious failure.
  84. *
  85. * Use either while holding wait_queue_head::lock or when used for wakeups
  86. * with an extra smp_mb() like:
  87. *
  88. * CPU0 - waker CPU1 - waiter
  89. *
  90. * for (;;) {
  91. * @cond = true; prepare_to_wait(&wq_head, &wait, state);
  92. * smp_mb(); // smp_mb() from set_current_state()
  93. * if (waitqueue_active(wq_head)) if (@cond)
  94. * wake_up(wq_head); break;
  95. * schedule();
  96. * }
  97. * finish_wait(&wq_head, &wait);
  98. *
  99. * Because without the explicit smp_mb() it's possible for the
  100. * waitqueue_active() load to get hoisted over the @cond store such that we'll
  101. * observe an empty wait list while the waiter might not observe @cond.
  102. *
  103. * Also note that this 'optimization' trades a spin_lock() for an smp_mb(),
  104. * which (when the lock is uncontended) are of roughly equal cost.
  105. */
  106. static inline int waitqueue_active(struct wait_queue_head *wq_head)
  107. {
  108. return !list_empty(&wq_head->head);
  109. }
  110. /**
  111. * wq_has_sleeper - check if there are any waiting processes
  112. * @wq_head: wait queue head
  113. *
  114. * Returns true if wq_head has waiting processes
  115. *
  116. * Please refer to the comment for waitqueue_active.
  117. */
  118. static inline bool wq_has_sleeper(struct wait_queue_head *wq_head)
  119. {
  120. /*
  121. * We need to be sure we are in sync with the
  122. * add_wait_queue modifications to the wait queue.
  123. *
  124. * This memory barrier should be paired with one on the
  125. * waiting side.
  126. */
  127. smp_mb();
  128. return waitqueue_active(wq_head);
  129. }
  130. extern void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
  131. extern void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
  132. extern void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
  133. static inline void __add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
  134. {
  135. list_add(&wq_entry->entry, &wq_head->head);
  136. }
  137. /*
  138. * Used for wake-one threads:
  139. */
  140. static inline void
  141. __add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
  142. {
  143. wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
  144. __add_wait_queue(wq_head, wq_entry);
  145. }
  146. static inline void __add_wait_queue_entry_tail(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
  147. {
  148. list_add_tail(&wq_entry->entry, &wq_head->head);
  149. }
  150. static inline void
  151. __add_wait_queue_entry_tail_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
  152. {
  153. wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
  154. __add_wait_queue_entry_tail(wq_head, wq_entry);
  155. }
  156. static inline void
  157. __remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
  158. {
  159. list_del(&wq_entry->entry);
  160. }
  161. void __wake_up(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
  162. void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
  163. void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head,
  164. unsigned int mode, void *key, wait_queue_entry_t *bookmark);
  165. void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
  166. void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr);
  167. void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr);
  168. #define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
  169. #define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
  170. #define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL)
  171. #define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1)
  172. #define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0)
  173. #define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
  174. #define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
  175. #define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
  176. #define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
  177. /*
  178. * Wakeup macros to be used to report events to the targets.
  179. */
  180. #define poll_to_key(m) ((void *)(__force uintptr_t)(__poll_t)(m))
  181. #define key_to_poll(m) ((__force __poll_t)(uintptr_t)(void *)(m))
  182. #define wake_up_poll(x, m) \
  183. __wake_up(x, TASK_NORMAL, 1, poll_to_key(m))
  184. #define wake_up_locked_poll(x, m) \
  185. __wake_up_locked_key((x), TASK_NORMAL, poll_to_key(m))
  186. #define wake_up_interruptible_poll(x, m) \
  187. __wake_up(x, TASK_INTERRUPTIBLE, 1, poll_to_key(m))
  188. #define wake_up_interruptible_sync_poll(x, m) \
  189. __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, poll_to_key(m))
  190. #define ___wait_cond_timeout(condition) \
  191. ({ \
  192. bool __cond = (condition); \
  193. if (__cond && !__ret) \
  194. __ret = 1; \
  195. __cond || !__ret; \
  196. })
  197. #define ___wait_is_interruptible(state) \
  198. (!__builtin_constant_p(state) || \
  199. state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE) \
  200. extern void init_wait_entry(struct wait_queue_entry *wq_entry, int flags);
  201. /*
  202. * The below macro ___wait_event() has an explicit shadow of the __ret
  203. * variable when used from the wait_event_*() macros.
  204. *
  205. * This is so that both can use the ___wait_cond_timeout() construct
  206. * to wrap the condition.
  207. *
  208. * The type inconsistency of the wait_event_*() __ret variable is also
  209. * on purpose; we use long where we can return timeout values and int
  210. * otherwise.
  211. */
  212. #define ___wait_event(wq_head, condition, state, exclusive, ret, cmd) \
  213. ({ \
  214. __label__ __out; \
  215. struct wait_queue_entry __wq_entry; \
  216. long __ret = ret; /* explicit shadow */ \
  217. \
  218. init_wait_entry(&__wq_entry, exclusive ? WQ_FLAG_EXCLUSIVE : 0); \
  219. for (;;) { \
  220. long __int = prepare_to_wait_event(&wq_head, &__wq_entry, state);\
  221. \
  222. if (condition) \
  223. break; \
  224. \
  225. if (___wait_is_interruptible(state) && __int) { \
  226. __ret = __int; \
  227. goto __out; \
  228. } \
  229. \
  230. cmd; \
  231. } \
  232. finish_wait(&wq_head, &__wq_entry); \
  233. __out: __ret; \
  234. })
  235. #define __wait_event(wq_head, condition) \
  236. (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
  237. schedule())
  238. /**
  239. * wait_event - sleep until a condition gets true
  240. * @wq_head: the waitqueue to wait on
  241. * @condition: a C expression for the event to wait for
  242. *
  243. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  244. * @condition evaluates to true. The @condition is checked each time
  245. * the waitqueue @wq_head is woken up.
  246. *
  247. * wake_up() has to be called after changing any variable that could
  248. * change the result of the wait condition.
  249. */
  250. #define wait_event(wq_head, condition) \
  251. do { \
  252. might_sleep(); \
  253. if (condition) \
  254. break; \
  255. __wait_event(wq_head, condition); \
  256. } while (0)
  257. #define __io_wait_event(wq_head, condition) \
  258. (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
  259. io_schedule())
  260. /*
  261. * io_wait_event() -- like wait_event() but with io_schedule()
  262. */
  263. #define io_wait_event(wq_head, condition) \
  264. do { \
  265. might_sleep(); \
  266. if (condition) \
  267. break; \
  268. __io_wait_event(wq_head, condition); \
  269. } while (0)
  270. #define __wait_event_freezable(wq_head, condition) \
  271. ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \
  272. schedule(); try_to_freeze())
  273. /**
  274. * wait_event_freezable - sleep (or freeze) until a condition gets true
  275. * @wq_head: the waitqueue to wait on
  276. * @condition: a C expression for the event to wait for
  277. *
  278. * The process is put to sleep (TASK_INTERRUPTIBLE -- so as not to contribute
  279. * to system load) until the @condition evaluates to true. The
  280. * @condition is checked each time the waitqueue @wq_head is woken up.
  281. *
  282. * wake_up() has to be called after changing any variable that could
  283. * change the result of the wait condition.
  284. */
  285. #define wait_event_freezable(wq_head, condition) \
  286. ({ \
  287. int __ret = 0; \
  288. might_sleep(); \
  289. if (!(condition)) \
  290. __ret = __wait_event_freezable(wq_head, condition); \
  291. __ret; \
  292. })
  293. #define __wait_event_timeout(wq_head, condition, timeout) \
  294. ___wait_event(wq_head, ___wait_cond_timeout(condition), \
  295. TASK_UNINTERRUPTIBLE, 0, timeout, \
  296. __ret = schedule_timeout(__ret))
  297. /**
  298. * wait_event_timeout - sleep until a condition gets true or a timeout elapses
  299. * @wq_head: the waitqueue to wait on
  300. * @condition: a C expression for the event to wait for
  301. * @timeout: timeout, in jiffies
  302. *
  303. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  304. * @condition evaluates to true. The @condition is checked each time
  305. * the waitqueue @wq_head is woken up.
  306. *
  307. * wake_up() has to be called after changing any variable that could
  308. * change the result of the wait condition.
  309. *
  310. * Returns:
  311. * 0 if the @condition evaluated to %false after the @timeout elapsed,
  312. * 1 if the @condition evaluated to %true after the @timeout elapsed,
  313. * or the remaining jiffies (at least 1) if the @condition evaluated
  314. * to %true before the @timeout elapsed.
  315. */
  316. #define wait_event_timeout(wq_head, condition, timeout) \
  317. ({ \
  318. long __ret = timeout; \
  319. might_sleep(); \
  320. if (!___wait_cond_timeout(condition)) \
  321. __ret = __wait_event_timeout(wq_head, condition, timeout); \
  322. __ret; \
  323. })
  324. #define __wait_event_freezable_timeout(wq_head, condition, timeout) \
  325. ___wait_event(wq_head, ___wait_cond_timeout(condition), \
  326. TASK_INTERRUPTIBLE, 0, timeout, \
  327. __ret = schedule_timeout(__ret); try_to_freeze())
  328. /*
  329. * like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid
  330. * increasing load and is freezable.
  331. */
  332. #define wait_event_freezable_timeout(wq_head, condition, timeout) \
  333. ({ \
  334. long __ret = timeout; \
  335. might_sleep(); \
  336. if (!___wait_cond_timeout(condition)) \
  337. __ret = __wait_event_freezable_timeout(wq_head, condition, timeout); \
  338. __ret; \
  339. })
  340. #define __wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2) \
  341. (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 1, 0, \
  342. cmd1; schedule(); cmd2)
  343. /*
  344. * Just like wait_event_cmd(), except it sets exclusive flag
  345. */
  346. #define wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2) \
  347. do { \
  348. if (condition) \
  349. break; \
  350. __wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2); \
  351. } while (0)
  352. #define __wait_event_cmd(wq_head, condition, cmd1, cmd2) \
  353. (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
  354. cmd1; schedule(); cmd2)
  355. /**
  356. * wait_event_cmd - sleep until a condition gets true
  357. * @wq_head: the waitqueue to wait on
  358. * @condition: a C expression for the event to wait for
  359. * @cmd1: the command will be executed before sleep
  360. * @cmd2: the command will be executed after sleep
  361. *
  362. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  363. * @condition evaluates to true. The @condition is checked each time
  364. * the waitqueue @wq_head is woken up.
  365. *
  366. * wake_up() has to be called after changing any variable that could
  367. * change the result of the wait condition.
  368. */
  369. #define wait_event_cmd(wq_head, condition, cmd1, cmd2) \
  370. do { \
  371. if (condition) \
  372. break; \
  373. __wait_event_cmd(wq_head, condition, cmd1, cmd2); \
  374. } while (0)
  375. #define __wait_event_interruptible(wq_head, condition) \
  376. ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \
  377. schedule())
  378. /**
  379. * wait_event_interruptible - sleep until a condition gets true
  380. * @wq_head: the waitqueue to wait on
  381. * @condition: a C expression for the event to wait for
  382. *
  383. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  384. * @condition evaluates to true or a signal is received.
  385. * The @condition is checked each time the waitqueue @wq_head is woken up.
  386. *
  387. * wake_up() has to be called after changing any variable that could
  388. * change the result of the wait condition.
  389. *
  390. * The function will return -ERESTARTSYS if it was interrupted by a
  391. * signal and 0 if @condition evaluated to true.
  392. */
  393. #define wait_event_interruptible(wq_head, condition) \
  394. ({ \
  395. int __ret = 0; \
  396. might_sleep(); \
  397. if (!(condition)) \
  398. __ret = __wait_event_interruptible(wq_head, condition); \
  399. __ret; \
  400. })
  401. #define __wait_event_interruptible_timeout(wq_head, condition, timeout) \
  402. ___wait_event(wq_head, ___wait_cond_timeout(condition), \
  403. TASK_INTERRUPTIBLE, 0, timeout, \
  404. __ret = schedule_timeout(__ret))
  405. /**
  406. * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
  407. * @wq_head: the waitqueue to wait on
  408. * @condition: a C expression for the event to wait for
  409. * @timeout: timeout, in jiffies
  410. *
  411. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  412. * @condition evaluates to true or a signal is received.
  413. * The @condition is checked each time the waitqueue @wq_head is woken up.
  414. *
  415. * wake_up() has to be called after changing any variable that could
  416. * change the result of the wait condition.
  417. *
  418. * Returns:
  419. * 0 if the @condition evaluated to %false after the @timeout elapsed,
  420. * 1 if the @condition evaluated to %true after the @timeout elapsed,
  421. * the remaining jiffies (at least 1) if the @condition evaluated
  422. * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
  423. * interrupted by a signal.
  424. */
  425. #define wait_event_interruptible_timeout(wq_head, condition, timeout) \
  426. ({ \
  427. long __ret = timeout; \
  428. might_sleep(); \
  429. if (!___wait_cond_timeout(condition)) \
  430. __ret = __wait_event_interruptible_timeout(wq_head, \
  431. condition, timeout); \
  432. __ret; \
  433. })
  434. #define __wait_event_hrtimeout(wq_head, condition, timeout, state) \
  435. ({ \
  436. int __ret = 0; \
  437. struct hrtimer_sleeper __t; \
  438. \
  439. hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); \
  440. hrtimer_init_sleeper(&__t, current); \
  441. if ((timeout) != KTIME_MAX) \
  442. hrtimer_start_range_ns(&__t.timer, timeout, \
  443. current->timer_slack_ns, \
  444. HRTIMER_MODE_REL); \
  445. \
  446. __ret = ___wait_event(wq_head, condition, state, 0, 0, \
  447. if (!__t.task) { \
  448. __ret = -ETIME; \
  449. break; \
  450. } \
  451. schedule()); \
  452. \
  453. hrtimer_cancel(&__t.timer); \
  454. destroy_hrtimer_on_stack(&__t.timer); \
  455. __ret; \
  456. })
  457. /**
  458. * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses
  459. * @wq_head: the waitqueue to wait on
  460. * @condition: a C expression for the event to wait for
  461. * @timeout: timeout, as a ktime_t
  462. *
  463. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  464. * @condition evaluates to true or a signal is received.
  465. * The @condition is checked each time the waitqueue @wq_head is woken up.
  466. *
  467. * wake_up() has to be called after changing any variable that could
  468. * change the result of the wait condition.
  469. *
  470. * The function returns 0 if @condition became true, or -ETIME if the timeout
  471. * elapsed.
  472. */
  473. #define wait_event_hrtimeout(wq_head, condition, timeout) \
  474. ({ \
  475. int __ret = 0; \
  476. might_sleep(); \
  477. if (!(condition)) \
  478. __ret = __wait_event_hrtimeout(wq_head, condition, timeout, \
  479. TASK_UNINTERRUPTIBLE); \
  480. __ret; \
  481. })
  482. /**
  483. * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
  484. * @wq: the waitqueue to wait on
  485. * @condition: a C expression for the event to wait for
  486. * @timeout: timeout, as a ktime_t
  487. *
  488. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  489. * @condition evaluates to true or a signal is received.
  490. * The @condition is checked each time the waitqueue @wq is woken up.
  491. *
  492. * wake_up() has to be called after changing any variable that could
  493. * change the result of the wait condition.
  494. *
  495. * The function returns 0 if @condition became true, -ERESTARTSYS if it was
  496. * interrupted by a signal, or -ETIME if the timeout elapsed.
  497. */
  498. #define wait_event_interruptible_hrtimeout(wq, condition, timeout) \
  499. ({ \
  500. long __ret = 0; \
  501. might_sleep(); \
  502. if (!(condition)) \
  503. __ret = __wait_event_hrtimeout(wq, condition, timeout, \
  504. TASK_INTERRUPTIBLE); \
  505. __ret; \
  506. })
  507. #define __wait_event_interruptible_exclusive(wq, condition) \
  508. ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
  509. schedule())
  510. #define wait_event_interruptible_exclusive(wq, condition) \
  511. ({ \
  512. int __ret = 0; \
  513. might_sleep(); \
  514. if (!(condition)) \
  515. __ret = __wait_event_interruptible_exclusive(wq, condition); \
  516. __ret; \
  517. })
  518. #define __wait_event_killable_exclusive(wq, condition) \
  519. ___wait_event(wq, condition, TASK_KILLABLE, 1, 0, \
  520. schedule())
  521. #define wait_event_killable_exclusive(wq, condition) \
  522. ({ \
  523. int __ret = 0; \
  524. might_sleep(); \
  525. if (!(condition)) \
  526. __ret = __wait_event_killable_exclusive(wq, condition); \
  527. __ret; \
  528. })
  529. #define __wait_event_freezable_exclusive(wq, condition) \
  530. ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
  531. schedule(); try_to_freeze())
  532. #define wait_event_freezable_exclusive(wq, condition) \
  533. ({ \
  534. int __ret = 0; \
  535. might_sleep(); \
  536. if (!(condition)) \
  537. __ret = __wait_event_freezable_exclusive(wq, condition); \
  538. __ret; \
  539. })
  540. extern int do_wait_intr(wait_queue_head_t *, wait_queue_entry_t *);
  541. extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_entry_t *);
  542. #define __wait_event_interruptible_locked(wq, condition, exclusive, fn) \
  543. ({ \
  544. int __ret; \
  545. DEFINE_WAIT(__wait); \
  546. if (exclusive) \
  547. __wait.flags |= WQ_FLAG_EXCLUSIVE; \
  548. do { \
  549. __ret = fn(&(wq), &__wait); \
  550. if (__ret) \
  551. break; \
  552. } while (!(condition)); \
  553. __remove_wait_queue(&(wq), &__wait); \
  554. __set_current_state(TASK_RUNNING); \
  555. __ret; \
  556. })
  557. /**
  558. * wait_event_interruptible_locked - sleep until a condition gets true
  559. * @wq: the waitqueue to wait on
  560. * @condition: a C expression for the event to wait for
  561. *
  562. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  563. * @condition evaluates to true or a signal is received.
  564. * The @condition is checked each time the waitqueue @wq is woken up.
  565. *
  566. * It must be called with wq.lock being held. This spinlock is
  567. * unlocked while sleeping but @condition testing is done while lock
  568. * is held and when this macro exits the lock is held.
  569. *
  570. * The lock is locked/unlocked using spin_lock()/spin_unlock()
  571. * functions which must match the way they are locked/unlocked outside
  572. * of this macro.
  573. *
  574. * wake_up_locked() has to be called after changing any variable that could
  575. * change the result of the wait condition.
  576. *
  577. * The function will return -ERESTARTSYS if it was interrupted by a
  578. * signal and 0 if @condition evaluated to true.
  579. */
  580. #define wait_event_interruptible_locked(wq, condition) \
  581. ((condition) \
  582. ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr))
  583. /**
  584. * wait_event_interruptible_locked_irq - sleep until a condition gets true
  585. * @wq: the waitqueue to wait on
  586. * @condition: a C expression for the event to wait for
  587. *
  588. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  589. * @condition evaluates to true or a signal is received.
  590. * The @condition is checked each time the waitqueue @wq is woken up.
  591. *
  592. * It must be called with wq.lock being held. This spinlock is
  593. * unlocked while sleeping but @condition testing is done while lock
  594. * is held and when this macro exits the lock is held.
  595. *
  596. * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
  597. * functions which must match the way they are locked/unlocked outside
  598. * of this macro.
  599. *
  600. * wake_up_locked() has to be called after changing any variable that could
  601. * change the result of the wait condition.
  602. *
  603. * The function will return -ERESTARTSYS if it was interrupted by a
  604. * signal and 0 if @condition evaluated to true.
  605. */
  606. #define wait_event_interruptible_locked_irq(wq, condition) \
  607. ((condition) \
  608. ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr_irq))
  609. /**
  610. * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
  611. * @wq: the waitqueue to wait on
  612. * @condition: a C expression for the event to wait for
  613. *
  614. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  615. * @condition evaluates to true or a signal is received.
  616. * The @condition is checked each time the waitqueue @wq is woken up.
  617. *
  618. * It must be called with wq.lock being held. This spinlock is
  619. * unlocked while sleeping but @condition testing is done while lock
  620. * is held and when this macro exits the lock is held.
  621. *
  622. * The lock is locked/unlocked using spin_lock()/spin_unlock()
  623. * functions which must match the way they are locked/unlocked outside
  624. * of this macro.
  625. *
  626. * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
  627. * set thus when other process waits process on the list if this
  628. * process is awaken further processes are not considered.
  629. *
  630. * wake_up_locked() has to be called after changing any variable that could
  631. * change the result of the wait condition.
  632. *
  633. * The function will return -ERESTARTSYS if it was interrupted by a
  634. * signal and 0 if @condition evaluated to true.
  635. */
  636. #define wait_event_interruptible_exclusive_locked(wq, condition) \
  637. ((condition) \
  638. ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr))
  639. /**
  640. * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
  641. * @wq: the waitqueue to wait on
  642. * @condition: a C expression for the event to wait for
  643. *
  644. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  645. * @condition evaluates to true or a signal is received.
  646. * The @condition is checked each time the waitqueue @wq is woken up.
  647. *
  648. * It must be called with wq.lock being held. This spinlock is
  649. * unlocked while sleeping but @condition testing is done while lock
  650. * is held and when this macro exits the lock is held.
  651. *
  652. * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
  653. * functions which must match the way they are locked/unlocked outside
  654. * of this macro.
  655. *
  656. * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
  657. * set thus when other process waits process on the list if this
  658. * process is awaken further processes are not considered.
  659. *
  660. * wake_up_locked() has to be called after changing any variable that could
  661. * change the result of the wait condition.
  662. *
  663. * The function will return -ERESTARTSYS if it was interrupted by a
  664. * signal and 0 if @condition evaluated to true.
  665. */
  666. #define wait_event_interruptible_exclusive_locked_irq(wq, condition) \
  667. ((condition) \
  668. ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr_irq))
  669. #define __wait_event_killable(wq, condition) \
  670. ___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
  671. /**
  672. * wait_event_killable - sleep until a condition gets true
  673. * @wq_head: the waitqueue to wait on
  674. * @condition: a C expression for the event to wait for
  675. *
  676. * The process is put to sleep (TASK_KILLABLE) until the
  677. * @condition evaluates to true or a signal is received.
  678. * The @condition is checked each time the waitqueue @wq_head is woken up.
  679. *
  680. * wake_up() has to be called after changing any variable that could
  681. * change the result of the wait condition.
  682. *
  683. * The function will return -ERESTARTSYS if it was interrupted by a
  684. * signal and 0 if @condition evaluated to true.
  685. */
  686. #define wait_event_killable(wq_head, condition) \
  687. ({ \
  688. int __ret = 0; \
  689. might_sleep(); \
  690. if (!(condition)) \
  691. __ret = __wait_event_killable(wq_head, condition); \
  692. __ret; \
  693. })
  694. #define __wait_event_killable_timeout(wq_head, condition, timeout) \
  695. ___wait_event(wq_head, ___wait_cond_timeout(condition), \
  696. TASK_KILLABLE, 0, timeout, \
  697. __ret = schedule_timeout(__ret))
  698. /**
  699. * wait_event_killable_timeout - sleep until a condition gets true or a timeout elapses
  700. * @wq_head: the waitqueue to wait on
  701. * @condition: a C expression for the event to wait for
  702. * @timeout: timeout, in jiffies
  703. *
  704. * The process is put to sleep (TASK_KILLABLE) until the
  705. * @condition evaluates to true or a kill signal is received.
  706. * The @condition is checked each time the waitqueue @wq_head is woken up.
  707. *
  708. * wake_up() has to be called after changing any variable that could
  709. * change the result of the wait condition.
  710. *
  711. * Returns:
  712. * 0 if the @condition evaluated to %false after the @timeout elapsed,
  713. * 1 if the @condition evaluated to %true after the @timeout elapsed,
  714. * the remaining jiffies (at least 1) if the @condition evaluated
  715. * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
  716. * interrupted by a kill signal.
  717. *
  718. * Only kill signals interrupt this process.
  719. */
  720. #define wait_event_killable_timeout(wq_head, condition, timeout) \
  721. ({ \
  722. long __ret = timeout; \
  723. might_sleep(); \
  724. if (!___wait_cond_timeout(condition)) \
  725. __ret = __wait_event_killable_timeout(wq_head, \
  726. condition, timeout); \
  727. __ret; \
  728. })
  729. #define __wait_event_lock_irq(wq_head, condition, lock, cmd) \
  730. (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
  731. spin_unlock_irq(&lock); \
  732. cmd; \
  733. schedule(); \
  734. spin_lock_irq(&lock))
  735. /**
  736. * wait_event_lock_irq_cmd - sleep until a condition gets true. The
  737. * condition is checked under the lock. This
  738. * is expected to be called with the lock
  739. * taken.
  740. * @wq_head: the waitqueue to wait on
  741. * @condition: a C expression for the event to wait for
  742. * @lock: a locked spinlock_t, which will be released before cmd
  743. * and schedule() and reacquired afterwards.
  744. * @cmd: a command which is invoked outside the critical section before
  745. * sleep
  746. *
  747. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  748. * @condition evaluates to true. The @condition is checked each time
  749. * the waitqueue @wq_head is woken up.
  750. *
  751. * wake_up() has to be called after changing any variable that could
  752. * change the result of the wait condition.
  753. *
  754. * This is supposed to be called while holding the lock. The lock is
  755. * dropped before invoking the cmd and going to sleep and is reacquired
  756. * afterwards.
  757. */
  758. #define wait_event_lock_irq_cmd(wq_head, condition, lock, cmd) \
  759. do { \
  760. if (condition) \
  761. break; \
  762. __wait_event_lock_irq(wq_head, condition, lock, cmd); \
  763. } while (0)
  764. /**
  765. * wait_event_lock_irq - sleep until a condition gets true. The
  766. * condition is checked under the lock. This
  767. * is expected to be called with the lock
  768. * taken.
  769. * @wq_head: the waitqueue to wait on
  770. * @condition: a C expression for the event to wait for
  771. * @lock: a locked spinlock_t, which will be released before schedule()
  772. * and reacquired afterwards.
  773. *
  774. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  775. * @condition evaluates to true. The @condition is checked each time
  776. * the waitqueue @wq_head is woken up.
  777. *
  778. * wake_up() has to be called after changing any variable that could
  779. * change the result of the wait condition.
  780. *
  781. * This is supposed to be called while holding the lock. The lock is
  782. * dropped before going to sleep and is reacquired afterwards.
  783. */
  784. #define wait_event_lock_irq(wq_head, condition, lock) \
  785. do { \
  786. if (condition) \
  787. break; \
  788. __wait_event_lock_irq(wq_head, condition, lock, ); \
  789. } while (0)
  790. #define __wait_event_interruptible_lock_irq(wq_head, condition, lock, cmd) \
  791. ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \
  792. spin_unlock_irq(&lock); \
  793. cmd; \
  794. schedule(); \
  795. spin_lock_irq(&lock))
  796. /**
  797. * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
  798. * The condition is checked under the lock. This is expected to
  799. * be called with the lock taken.
  800. * @wq_head: the waitqueue to wait on
  801. * @condition: a C expression for the event to wait for
  802. * @lock: a locked spinlock_t, which will be released before cmd and
  803. * schedule() and reacquired afterwards.
  804. * @cmd: a command which is invoked outside the critical section before
  805. * sleep
  806. *
  807. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  808. * @condition evaluates to true or a signal is received. The @condition is
  809. * checked each time the waitqueue @wq_head is woken up.
  810. *
  811. * wake_up() has to be called after changing any variable that could
  812. * change the result of the wait condition.
  813. *
  814. * This is supposed to be called while holding the lock. The lock is
  815. * dropped before invoking the cmd and going to sleep and is reacquired
  816. * afterwards.
  817. *
  818. * The macro will return -ERESTARTSYS if it was interrupted by a signal
  819. * and 0 if @condition evaluated to true.
  820. */
  821. #define wait_event_interruptible_lock_irq_cmd(wq_head, condition, lock, cmd) \
  822. ({ \
  823. int __ret = 0; \
  824. if (!(condition)) \
  825. __ret = __wait_event_interruptible_lock_irq(wq_head, \
  826. condition, lock, cmd); \
  827. __ret; \
  828. })
  829. /**
  830. * wait_event_interruptible_lock_irq - sleep until a condition gets true.
  831. * The condition is checked under the lock. This is expected
  832. * to be called with the lock taken.
  833. * @wq_head: the waitqueue to wait on
  834. * @condition: a C expression for the event to wait for
  835. * @lock: a locked spinlock_t, which will be released before schedule()
  836. * and reacquired afterwards.
  837. *
  838. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  839. * @condition evaluates to true or signal is received. The @condition is
  840. * checked each time the waitqueue @wq_head is woken up.
  841. *
  842. * wake_up() has to be called after changing any variable that could
  843. * change the result of the wait condition.
  844. *
  845. * This is supposed to be called while holding the lock. The lock is
  846. * dropped before going to sleep and is reacquired afterwards.
  847. *
  848. * The macro will return -ERESTARTSYS if it was interrupted by a signal
  849. * and 0 if @condition evaluated to true.
  850. */
  851. #define wait_event_interruptible_lock_irq(wq_head, condition, lock) \
  852. ({ \
  853. int __ret = 0; \
  854. if (!(condition)) \
  855. __ret = __wait_event_interruptible_lock_irq(wq_head, \
  856. condition, lock,); \
  857. __ret; \
  858. })
  859. #define __wait_event_interruptible_lock_irq_timeout(wq_head, condition, \
  860. lock, timeout) \
  861. ___wait_event(wq_head, ___wait_cond_timeout(condition), \
  862. TASK_INTERRUPTIBLE, 0, timeout, \
  863. spin_unlock_irq(&lock); \
  864. __ret = schedule_timeout(__ret); \
  865. spin_lock_irq(&lock));
  866. /**
  867. * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets
  868. * true or a timeout elapses. The condition is checked under
  869. * the lock. This is expected to be called with the lock taken.
  870. * @wq_head: the waitqueue to wait on
  871. * @condition: a C expression for the event to wait for
  872. * @lock: a locked spinlock_t, which will be released before schedule()
  873. * and reacquired afterwards.
  874. * @timeout: timeout, in jiffies
  875. *
  876. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  877. * @condition evaluates to true or signal is received. The @condition is
  878. * checked each time the waitqueue @wq_head is woken up.
  879. *
  880. * wake_up() has to be called after changing any variable that could
  881. * change the result of the wait condition.
  882. *
  883. * This is supposed to be called while holding the lock. The lock is
  884. * dropped before going to sleep and is reacquired afterwards.
  885. *
  886. * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
  887. * was interrupted by a signal, and the remaining jiffies otherwise
  888. * if the condition evaluated to true before the timeout elapsed.
  889. */
  890. #define wait_event_interruptible_lock_irq_timeout(wq_head, condition, lock, \
  891. timeout) \
  892. ({ \
  893. long __ret = timeout; \
  894. if (!___wait_cond_timeout(condition)) \
  895. __ret = __wait_event_interruptible_lock_irq_timeout( \
  896. wq_head, condition, lock, timeout); \
  897. __ret; \
  898. })
  899. /*
  900. * Waitqueues which are removed from the waitqueue_head at wakeup time
  901. */
  902. void prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
  903. void prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
  904. long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
  905. void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
  906. long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout);
  907. int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
  908. int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
  909. #define DEFINE_WAIT_FUNC(name, function) \
  910. struct wait_queue_entry name = { \
  911. .private = current, \
  912. .func = function, \
  913. .entry = LIST_HEAD_INIT((name).entry), \
  914. }
  915. #define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
  916. #define init_wait(wait) \
  917. do { \
  918. (wait)->private = current; \
  919. (wait)->func = autoremove_wake_function; \
  920. INIT_LIST_HEAD(&(wait)->entry); \
  921. (wait)->flags = 0; \
  922. } while (0)
  923. #endif /* _LINUX_WAIT_H */