wait.h 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008
  1. #ifndef _LINUX_WAIT_H
  2. #define _LINUX_WAIT_H
  3. /*
  4. * Linux wait queue related types and methods
  5. */
  6. #include <linux/list.h>
  7. #include <linux/stddef.h>
  8. #include <linux/spinlock.h>
  9. #include <asm/current.h>
  10. #include <uapi/linux/wait.h>
  11. typedef struct wait_queue_entry wait_queue_entry_t;
  12. typedef int (*wait_queue_func_t)(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
  13. int default_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
  14. /* wait_queue_entry::flags */
  15. #define WQ_FLAG_EXCLUSIVE 0x01
  16. #define WQ_FLAG_WOKEN 0x02
  17. #define WQ_FLAG_BOOKMARK 0x04
  18. /*
  19. * A single wait-queue entry structure:
  20. */
  21. struct wait_queue_entry {
  22. unsigned int flags;
  23. void *private;
  24. wait_queue_func_t func;
  25. struct list_head entry;
  26. };
  27. struct wait_queue_head {
  28. spinlock_t lock;
  29. struct list_head head;
  30. };
  31. typedef struct wait_queue_head wait_queue_head_t;
  32. struct task_struct;
  33. /*
  34. * Macros for declaration and initialisaton of the datatypes
  35. */
  36. #define __WAITQUEUE_INITIALIZER(name, tsk) { \
  37. .private = tsk, \
  38. .func = default_wake_function, \
  39. .entry = { NULL, NULL } }
  40. #define DECLARE_WAITQUEUE(name, tsk) \
  41. struct wait_queue_entry name = __WAITQUEUE_INITIALIZER(name, tsk)
  42. #define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
  43. .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
  44. .head = { &(name).head, &(name).head } }
  45. #define DECLARE_WAIT_QUEUE_HEAD(name) \
  46. struct wait_queue_head name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
  47. extern void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *);
  48. #define init_waitqueue_head(wq_head) \
  49. do { \
  50. static struct lock_class_key __key; \
  51. \
  52. __init_waitqueue_head((wq_head), #wq_head, &__key); \
  53. } while (0)
  54. #ifdef CONFIG_LOCKDEP
  55. # define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
  56. ({ init_waitqueue_head(&name); name; })
  57. # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
  58. struct wait_queue_head name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
  59. #else
  60. # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
  61. #endif
  62. static inline void init_waitqueue_entry(struct wait_queue_entry *wq_entry, struct task_struct *p)
  63. {
  64. wq_entry->flags = 0;
  65. wq_entry->private = p;
  66. wq_entry->func = default_wake_function;
  67. }
  68. static inline void
  69. init_waitqueue_func_entry(struct wait_queue_entry *wq_entry, wait_queue_func_t func)
  70. {
  71. wq_entry->flags = 0;
  72. wq_entry->private = NULL;
  73. wq_entry->func = func;
  74. }
  75. /**
  76. * waitqueue_active -- locklessly test for waiters on the queue
  77. * @wq_head: the waitqueue to test for waiters
  78. *
  79. * returns true if the wait list is not empty
  80. *
  81. * NOTE: this function is lockless and requires care, incorrect usage _will_
  82. * lead to sporadic and non-obvious failure.
  83. *
  84. * Use either while holding wait_queue_head::lock or when used for wakeups
  85. * with an extra smp_mb() like:
  86. *
  87. * CPU0 - waker CPU1 - waiter
  88. *
  89. * for (;;) {
  90. * @cond = true; prepare_to_wait(&wq_head, &wait, state);
  91. * smp_mb(); // smp_mb() from set_current_state()
  92. * if (waitqueue_active(wq_head)) if (@cond)
  93. * wake_up(wq_head); break;
  94. * schedule();
  95. * }
  96. * finish_wait(&wq_head, &wait);
  97. *
  98. * Because without the explicit smp_mb() it's possible for the
  99. * waitqueue_active() load to get hoisted over the @cond store such that we'll
  100. * observe an empty wait list while the waiter might not observe @cond.
  101. *
  102. * Also note that this 'optimization' trades a spin_lock() for an smp_mb(),
  103. * which (when the lock is uncontended) are of roughly equal cost.
  104. */
  105. static inline int waitqueue_active(struct wait_queue_head *wq_head)
  106. {
  107. return !list_empty(&wq_head->head);
  108. }
  109. /**
  110. * wq_has_sleeper - check if there are any waiting processes
  111. * @wq_head: wait queue head
  112. *
  113. * Returns true if wq_head has waiting processes
  114. *
  115. * Please refer to the comment for waitqueue_active.
  116. */
  117. static inline bool wq_has_sleeper(struct wait_queue_head *wq_head)
  118. {
  119. /*
  120. * We need to be sure we are in sync with the
  121. * add_wait_queue modifications to the wait queue.
  122. *
  123. * This memory barrier should be paired with one on the
  124. * waiting side.
  125. */
  126. smp_mb();
  127. return waitqueue_active(wq_head);
  128. }
  129. extern void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
  130. extern void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
  131. extern void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
  132. static inline void __add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
  133. {
  134. list_add(&wq_entry->entry, &wq_head->head);
  135. }
  136. /*
  137. * Used for wake-one threads:
  138. */
  139. static inline void
  140. __add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
  141. {
  142. wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
  143. __add_wait_queue(wq_head, wq_entry);
  144. }
  145. static inline void __add_wait_queue_entry_tail(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
  146. {
  147. list_add_tail(&wq_entry->entry, &wq_head->head);
  148. }
  149. static inline void
  150. __add_wait_queue_entry_tail_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
  151. {
  152. wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
  153. __add_wait_queue_entry_tail(wq_head, wq_entry);
  154. }
  155. static inline void
  156. __remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
  157. {
  158. list_del(&wq_entry->entry);
  159. }
  160. void __wake_up(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
  161. void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
  162. void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head,
  163. unsigned int mode, void *key, wait_queue_entry_t *bookmark);
  164. void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
  165. void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr);
  166. void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr);
  167. #define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
  168. #define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
  169. #define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL)
  170. #define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1)
  171. #define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0)
  172. #define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
  173. #define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
  174. #define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
  175. #define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
  176. /*
  177. * Wakeup macros to be used to report events to the targets.
  178. */
  179. #define wake_up_poll(x, m) \
  180. __wake_up(x, TASK_NORMAL, 1, (void *) (m))
  181. #define wake_up_locked_poll(x, m) \
  182. __wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
  183. #define wake_up_interruptible_poll(x, m) \
  184. __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
  185. #define wake_up_interruptible_sync_poll(x, m) \
  186. __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))
  187. #define ___wait_cond_timeout(condition) \
  188. ({ \
  189. bool __cond = (condition); \
  190. if (__cond && !__ret) \
  191. __ret = 1; \
  192. __cond || !__ret; \
  193. })
  194. #define ___wait_is_interruptible(state) \
  195. (!__builtin_constant_p(state) || \
  196. state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE) \
  197. extern void init_wait_entry(struct wait_queue_entry *wq_entry, int flags);
  198. /*
  199. * The below macro ___wait_event() has an explicit shadow of the __ret
  200. * variable when used from the wait_event_*() macros.
  201. *
  202. * This is so that both can use the ___wait_cond_timeout() construct
  203. * to wrap the condition.
  204. *
  205. * The type inconsistency of the wait_event_*() __ret variable is also
  206. * on purpose; we use long where we can return timeout values and int
  207. * otherwise.
  208. */
  209. #define ___wait_event(wq_head, condition, state, exclusive, ret, cmd) \
  210. ({ \
  211. __label__ __out; \
  212. struct wait_queue_entry __wq_entry; \
  213. long __ret = ret; /* explicit shadow */ \
  214. \
  215. init_wait_entry(&__wq_entry, exclusive ? WQ_FLAG_EXCLUSIVE : 0); \
  216. for (;;) { \
  217. long __int = prepare_to_wait_event(&wq_head, &__wq_entry, state);\
  218. \
  219. if (condition) \
  220. break; \
  221. \
  222. if (___wait_is_interruptible(state) && __int) { \
  223. __ret = __int; \
  224. goto __out; \
  225. } \
  226. \
  227. cmd; \
  228. } \
  229. finish_wait(&wq_head, &__wq_entry); \
  230. __out: __ret; \
  231. })
  232. #define __wait_event(wq_head, condition) \
  233. (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
  234. schedule())
  235. /**
  236. * wait_event - sleep until a condition gets true
  237. * @wq_head: the waitqueue to wait on
  238. * @condition: a C expression for the event to wait for
  239. *
  240. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  241. * @condition evaluates to true. The @condition is checked each time
  242. * the waitqueue @wq_head is woken up.
  243. *
  244. * wake_up() has to be called after changing any variable that could
  245. * change the result of the wait condition.
  246. */
  247. #define wait_event(wq_head, condition) \
  248. do { \
  249. might_sleep(); \
  250. if (condition) \
  251. break; \
  252. __wait_event(wq_head, condition); \
  253. } while (0)
  254. #define __io_wait_event(wq_head, condition) \
  255. (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
  256. io_schedule())
  257. /*
  258. * io_wait_event() -- like wait_event() but with io_schedule()
  259. */
  260. #define io_wait_event(wq_head, condition) \
  261. do { \
  262. might_sleep(); \
  263. if (condition) \
  264. break; \
  265. __io_wait_event(wq_head, condition); \
  266. } while (0)
  267. #define __wait_event_freezable(wq_head, condition) \
  268. ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \
  269. schedule(); try_to_freeze())
  270. /**
  271. * wait_event_freezable - sleep (or freeze) until a condition gets true
  272. * @wq_head: the waitqueue to wait on
  273. * @condition: a C expression for the event to wait for
  274. *
  275. * The process is put to sleep (TASK_INTERRUPTIBLE -- so as not to contribute
  276. * to system load) until the @condition evaluates to true. The
  277. * @condition is checked each time the waitqueue @wq_head is woken up.
  278. *
  279. * wake_up() has to be called after changing any variable that could
  280. * change the result of the wait condition.
  281. */
  282. #define wait_event_freezable(wq_head, condition) \
  283. ({ \
  284. int __ret = 0; \
  285. might_sleep(); \
  286. if (!(condition)) \
  287. __ret = __wait_event_freezable(wq_head, condition); \
  288. __ret; \
  289. })
  290. #define __wait_event_timeout(wq_head, condition, timeout) \
  291. ___wait_event(wq_head, ___wait_cond_timeout(condition), \
  292. TASK_UNINTERRUPTIBLE, 0, timeout, \
  293. __ret = schedule_timeout(__ret))
  294. /**
  295. * wait_event_timeout - sleep until a condition gets true or a timeout elapses
  296. * @wq_head: the waitqueue to wait on
  297. * @condition: a C expression for the event to wait for
  298. * @timeout: timeout, in jiffies
  299. *
  300. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  301. * @condition evaluates to true. The @condition is checked each time
  302. * the waitqueue @wq_head is woken up.
  303. *
  304. * wake_up() has to be called after changing any variable that could
  305. * change the result of the wait condition.
  306. *
  307. * Returns:
  308. * 0 if the @condition evaluated to %false after the @timeout elapsed,
  309. * 1 if the @condition evaluated to %true after the @timeout elapsed,
  310. * or the remaining jiffies (at least 1) if the @condition evaluated
  311. * to %true before the @timeout elapsed.
  312. */
  313. #define wait_event_timeout(wq_head, condition, timeout) \
  314. ({ \
  315. long __ret = timeout; \
  316. might_sleep(); \
  317. if (!___wait_cond_timeout(condition)) \
  318. __ret = __wait_event_timeout(wq_head, condition, timeout); \
  319. __ret; \
  320. })
  321. #define __wait_event_freezable_timeout(wq_head, condition, timeout) \
  322. ___wait_event(wq_head, ___wait_cond_timeout(condition), \
  323. TASK_INTERRUPTIBLE, 0, timeout, \
  324. __ret = schedule_timeout(__ret); try_to_freeze())
  325. /*
  326. * like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid
  327. * increasing load and is freezable.
  328. */
  329. #define wait_event_freezable_timeout(wq_head, condition, timeout) \
  330. ({ \
  331. long __ret = timeout; \
  332. might_sleep(); \
  333. if (!___wait_cond_timeout(condition)) \
  334. __ret = __wait_event_freezable_timeout(wq_head, condition, timeout); \
  335. __ret; \
  336. })
  337. #define __wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2) \
  338. (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 1, 0, \
  339. cmd1; schedule(); cmd2)
  340. /*
  341. * Just like wait_event_cmd(), except it sets exclusive flag
  342. */
  343. #define wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2) \
  344. do { \
  345. if (condition) \
  346. break; \
  347. __wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2); \
  348. } while (0)
  349. #define __wait_event_cmd(wq_head, condition, cmd1, cmd2) \
  350. (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
  351. cmd1; schedule(); cmd2)
  352. /**
  353. * wait_event_cmd - sleep until a condition gets true
  354. * @wq_head: the waitqueue to wait on
  355. * @condition: a C expression for the event to wait for
  356. * @cmd1: the command will be executed before sleep
  357. * @cmd2: the command will be executed after sleep
  358. *
  359. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  360. * @condition evaluates to true. The @condition is checked each time
  361. * the waitqueue @wq_head is woken up.
  362. *
  363. * wake_up() has to be called after changing any variable that could
  364. * change the result of the wait condition.
  365. */
  366. #define wait_event_cmd(wq_head, condition, cmd1, cmd2) \
  367. do { \
  368. if (condition) \
  369. break; \
  370. __wait_event_cmd(wq_head, condition, cmd1, cmd2); \
  371. } while (0)
  372. #define __wait_event_interruptible(wq_head, condition) \
  373. ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \
  374. schedule())
  375. /**
  376. * wait_event_interruptible - sleep until a condition gets true
  377. * @wq_head: the waitqueue to wait on
  378. * @condition: a C expression for the event to wait for
  379. *
  380. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  381. * @condition evaluates to true or a signal is received.
  382. * The @condition is checked each time the waitqueue @wq_head is woken up.
  383. *
  384. * wake_up() has to be called after changing any variable that could
  385. * change the result of the wait condition.
  386. *
  387. * The function will return -ERESTARTSYS if it was interrupted by a
  388. * signal and 0 if @condition evaluated to true.
  389. */
  390. #define wait_event_interruptible(wq_head, condition) \
  391. ({ \
  392. int __ret = 0; \
  393. might_sleep(); \
  394. if (!(condition)) \
  395. __ret = __wait_event_interruptible(wq_head, condition); \
  396. __ret; \
  397. })
  398. #define __wait_event_interruptible_timeout(wq_head, condition, timeout) \
  399. ___wait_event(wq_head, ___wait_cond_timeout(condition), \
  400. TASK_INTERRUPTIBLE, 0, timeout, \
  401. __ret = schedule_timeout(__ret))
  402. /**
  403. * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
  404. * @wq_head: the waitqueue to wait on
  405. * @condition: a C expression for the event to wait for
  406. * @timeout: timeout, in jiffies
  407. *
  408. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  409. * @condition evaluates to true or a signal is received.
  410. * The @condition is checked each time the waitqueue @wq_head is woken up.
  411. *
  412. * wake_up() has to be called after changing any variable that could
  413. * change the result of the wait condition.
  414. *
  415. * Returns:
  416. * 0 if the @condition evaluated to %false after the @timeout elapsed,
  417. * 1 if the @condition evaluated to %true after the @timeout elapsed,
  418. * the remaining jiffies (at least 1) if the @condition evaluated
  419. * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
  420. * interrupted by a signal.
  421. */
  422. #define wait_event_interruptible_timeout(wq_head, condition, timeout) \
  423. ({ \
  424. long __ret = timeout; \
  425. might_sleep(); \
  426. if (!___wait_cond_timeout(condition)) \
  427. __ret = __wait_event_interruptible_timeout(wq_head, \
  428. condition, timeout); \
  429. __ret; \
  430. })
  431. #define __wait_event_hrtimeout(wq_head, condition, timeout, state) \
  432. ({ \
  433. int __ret = 0; \
  434. struct hrtimer_sleeper __t; \
  435. \
  436. hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); \
  437. hrtimer_init_sleeper(&__t, current); \
  438. if ((timeout) != KTIME_MAX) \
  439. hrtimer_start_range_ns(&__t.timer, timeout, \
  440. current->timer_slack_ns, \
  441. HRTIMER_MODE_REL); \
  442. \
  443. __ret = ___wait_event(wq_head, condition, state, 0, 0, \
  444. if (!__t.task) { \
  445. __ret = -ETIME; \
  446. break; \
  447. } \
  448. schedule()); \
  449. \
  450. hrtimer_cancel(&__t.timer); \
  451. destroy_hrtimer_on_stack(&__t.timer); \
  452. __ret; \
  453. })
  454. /**
  455. * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses
  456. * @wq_head: the waitqueue to wait on
  457. * @condition: a C expression for the event to wait for
  458. * @timeout: timeout, as a ktime_t
  459. *
  460. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  461. * @condition evaluates to true or a signal is received.
  462. * The @condition is checked each time the waitqueue @wq_head is woken up.
  463. *
  464. * wake_up() has to be called after changing any variable that could
  465. * change the result of the wait condition.
  466. *
  467. * The function returns 0 if @condition became true, or -ETIME if the timeout
  468. * elapsed.
  469. */
  470. #define wait_event_hrtimeout(wq_head, condition, timeout) \
  471. ({ \
  472. int __ret = 0; \
  473. might_sleep(); \
  474. if (!(condition)) \
  475. __ret = __wait_event_hrtimeout(wq_head, condition, timeout, \
  476. TASK_UNINTERRUPTIBLE); \
  477. __ret; \
  478. })
  479. /**
  480. * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
  481. * @wq: the waitqueue to wait on
  482. * @condition: a C expression for the event to wait for
  483. * @timeout: timeout, as a ktime_t
  484. *
  485. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  486. * @condition evaluates to true or a signal is received.
  487. * The @condition is checked each time the waitqueue @wq is woken up.
  488. *
  489. * wake_up() has to be called after changing any variable that could
  490. * change the result of the wait condition.
  491. *
  492. * The function returns 0 if @condition became true, -ERESTARTSYS if it was
  493. * interrupted by a signal, or -ETIME if the timeout elapsed.
  494. */
  495. #define wait_event_interruptible_hrtimeout(wq, condition, timeout) \
  496. ({ \
  497. long __ret = 0; \
  498. might_sleep(); \
  499. if (!(condition)) \
  500. __ret = __wait_event_hrtimeout(wq, condition, timeout, \
  501. TASK_INTERRUPTIBLE); \
  502. __ret; \
  503. })
  504. #define __wait_event_interruptible_exclusive(wq, condition) \
  505. ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
  506. schedule())
  507. #define wait_event_interruptible_exclusive(wq, condition) \
  508. ({ \
  509. int __ret = 0; \
  510. might_sleep(); \
  511. if (!(condition)) \
  512. __ret = __wait_event_interruptible_exclusive(wq, condition); \
  513. __ret; \
  514. })
  515. #define __wait_event_killable_exclusive(wq, condition) \
  516. ___wait_event(wq, condition, TASK_KILLABLE, 1, 0, \
  517. schedule())
  518. #define wait_event_killable_exclusive(wq, condition) \
  519. ({ \
  520. int __ret = 0; \
  521. might_sleep(); \
  522. if (!(condition)) \
  523. __ret = __wait_event_killable_exclusive(wq, condition); \
  524. __ret; \
  525. })
  526. #define __wait_event_freezable_exclusive(wq, condition) \
  527. ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
  528. schedule(); try_to_freeze())
  529. #define wait_event_freezable_exclusive(wq, condition) \
  530. ({ \
  531. int __ret = 0; \
  532. might_sleep(); \
  533. if (!(condition)) \
  534. __ret = __wait_event_freezable_exclusive(wq, condition); \
  535. __ret; \
  536. })
  537. extern int do_wait_intr(wait_queue_head_t *, wait_queue_entry_t *);
  538. extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_entry_t *);
  539. #define __wait_event_interruptible_locked(wq, condition, exclusive, fn) \
  540. ({ \
  541. int __ret; \
  542. DEFINE_WAIT(__wait); \
  543. if (exclusive) \
  544. __wait.flags |= WQ_FLAG_EXCLUSIVE; \
  545. do { \
  546. __ret = fn(&(wq), &__wait); \
  547. if (__ret) \
  548. break; \
  549. } while (!(condition)); \
  550. __remove_wait_queue(&(wq), &__wait); \
  551. __set_current_state(TASK_RUNNING); \
  552. __ret; \
  553. })
  554. /**
  555. * wait_event_interruptible_locked - sleep until a condition gets true
  556. * @wq: the waitqueue to wait on
  557. * @condition: a C expression for the event to wait for
  558. *
  559. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  560. * @condition evaluates to true or a signal is received.
  561. * The @condition is checked each time the waitqueue @wq is woken up.
  562. *
  563. * It must be called with wq.lock being held. This spinlock is
  564. * unlocked while sleeping but @condition testing is done while lock
  565. * is held and when this macro exits the lock is held.
  566. *
  567. * The lock is locked/unlocked using spin_lock()/spin_unlock()
  568. * functions which must match the way they are locked/unlocked outside
  569. * of this macro.
  570. *
  571. * wake_up_locked() has to be called after changing any variable that could
  572. * change the result of the wait condition.
  573. *
  574. * The function will return -ERESTARTSYS if it was interrupted by a
  575. * signal and 0 if @condition evaluated to true.
  576. */
  577. #define wait_event_interruptible_locked(wq, condition) \
  578. ((condition) \
  579. ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr))
  580. /**
  581. * wait_event_interruptible_locked_irq - sleep until a condition gets true
  582. * @wq: the waitqueue to wait on
  583. * @condition: a C expression for the event to wait for
  584. *
  585. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  586. * @condition evaluates to true or a signal is received.
  587. * The @condition is checked each time the waitqueue @wq is woken up.
  588. *
  589. * It must be called with wq.lock being held. This spinlock is
  590. * unlocked while sleeping but @condition testing is done while lock
  591. * is held and when this macro exits the lock is held.
  592. *
  593. * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
  594. * functions which must match the way they are locked/unlocked outside
  595. * of this macro.
  596. *
  597. * wake_up_locked() has to be called after changing any variable that could
  598. * change the result of the wait condition.
  599. *
  600. * The function will return -ERESTARTSYS if it was interrupted by a
  601. * signal and 0 if @condition evaluated to true.
  602. */
  603. #define wait_event_interruptible_locked_irq(wq, condition) \
  604. ((condition) \
  605. ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr_irq))
  606. /**
  607. * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
  608. * @wq: the waitqueue to wait on
  609. * @condition: a C expression for the event to wait for
  610. *
  611. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  612. * @condition evaluates to true or a signal is received.
  613. * The @condition is checked each time the waitqueue @wq is woken up.
  614. *
  615. * It must be called with wq.lock being held. This spinlock is
  616. * unlocked while sleeping but @condition testing is done while lock
  617. * is held and when this macro exits the lock is held.
  618. *
  619. * The lock is locked/unlocked using spin_lock()/spin_unlock()
  620. * functions which must match the way they are locked/unlocked outside
  621. * of this macro.
  622. *
  623. * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
  624. * set thus when other process waits process on the list if this
  625. * process is awaken further processes are not considered.
  626. *
  627. * wake_up_locked() has to be called after changing any variable that could
  628. * change the result of the wait condition.
  629. *
  630. * The function will return -ERESTARTSYS if it was interrupted by a
  631. * signal and 0 if @condition evaluated to true.
  632. */
  633. #define wait_event_interruptible_exclusive_locked(wq, condition) \
  634. ((condition) \
  635. ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr))
  636. /**
  637. * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
  638. * @wq: the waitqueue to wait on
  639. * @condition: a C expression for the event to wait for
  640. *
  641. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  642. * @condition evaluates to true or a signal is received.
  643. * The @condition is checked each time the waitqueue @wq is woken up.
  644. *
  645. * It must be called with wq.lock being held. This spinlock is
  646. * unlocked while sleeping but @condition testing is done while lock
  647. * is held and when this macro exits the lock is held.
  648. *
  649. * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
  650. * functions which must match the way they are locked/unlocked outside
  651. * of this macro.
  652. *
  653. * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
  654. * set thus when other process waits process on the list if this
  655. * process is awaken further processes are not considered.
  656. *
  657. * wake_up_locked() has to be called after changing any variable that could
  658. * change the result of the wait condition.
  659. *
  660. * The function will return -ERESTARTSYS if it was interrupted by a
  661. * signal and 0 if @condition evaluated to true.
  662. */
  663. #define wait_event_interruptible_exclusive_locked_irq(wq, condition) \
  664. ((condition) \
  665. ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr_irq))
  666. #define __wait_event_killable(wq, condition) \
  667. ___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
  668. /**
  669. * wait_event_killable - sleep until a condition gets true
  670. * @wq_head: the waitqueue to wait on
  671. * @condition: a C expression for the event to wait for
  672. *
  673. * The process is put to sleep (TASK_KILLABLE) until the
  674. * @condition evaluates to true or a signal is received.
  675. * The @condition is checked each time the waitqueue @wq_head is woken up.
  676. *
  677. * wake_up() has to be called after changing any variable that could
  678. * change the result of the wait condition.
  679. *
  680. * The function will return -ERESTARTSYS if it was interrupted by a
  681. * signal and 0 if @condition evaluated to true.
  682. */
  683. #define wait_event_killable(wq_head, condition) \
  684. ({ \
  685. int __ret = 0; \
  686. might_sleep(); \
  687. if (!(condition)) \
  688. __ret = __wait_event_killable(wq_head, condition); \
  689. __ret; \
  690. })
  691. #define __wait_event_killable_timeout(wq_head, condition, timeout) \
  692. ___wait_event(wq_head, ___wait_cond_timeout(condition), \
  693. TASK_KILLABLE, 0, timeout, \
  694. __ret = schedule_timeout(__ret))
  695. /**
  696. * wait_event_killable_timeout - sleep until a condition gets true or a timeout elapses
  697. * @wq_head: the waitqueue to wait on
  698. * @condition: a C expression for the event to wait for
  699. * @timeout: timeout, in jiffies
  700. *
  701. * The process is put to sleep (TASK_KILLABLE) until the
  702. * @condition evaluates to true or a kill signal is received.
  703. * The @condition is checked each time the waitqueue @wq_head is woken up.
  704. *
  705. * wake_up() has to be called after changing any variable that could
  706. * change the result of the wait condition.
  707. *
  708. * Returns:
  709. * 0 if the @condition evaluated to %false after the @timeout elapsed,
  710. * 1 if the @condition evaluated to %true after the @timeout elapsed,
  711. * the remaining jiffies (at least 1) if the @condition evaluated
  712. * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
  713. * interrupted by a kill signal.
  714. *
  715. * Only kill signals interrupt this process.
  716. */
  717. #define wait_event_killable_timeout(wq_head, condition, timeout) \
  718. ({ \
  719. long __ret = timeout; \
  720. might_sleep(); \
  721. if (!___wait_cond_timeout(condition)) \
  722. __ret = __wait_event_killable_timeout(wq_head, \
  723. condition, timeout); \
  724. __ret; \
  725. })
  726. #define __wait_event_lock_irq(wq_head, condition, lock, cmd) \
  727. (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
  728. spin_unlock_irq(&lock); \
  729. cmd; \
  730. schedule(); \
  731. spin_lock_irq(&lock))
  732. /**
  733. * wait_event_lock_irq_cmd - sleep until a condition gets true. The
  734. * condition is checked under the lock. This
  735. * is expected to be called with the lock
  736. * taken.
  737. * @wq_head: the waitqueue to wait on
  738. * @condition: a C expression for the event to wait for
  739. * @lock: a locked spinlock_t, which will be released before cmd
  740. * and schedule() and reacquired afterwards.
  741. * @cmd: a command which is invoked outside the critical section before
  742. * sleep
  743. *
  744. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  745. * @condition evaluates to true. The @condition is checked each time
  746. * the waitqueue @wq_head is woken up.
  747. *
  748. * wake_up() has to be called after changing any variable that could
  749. * change the result of the wait condition.
  750. *
  751. * This is supposed to be called while holding the lock. The lock is
  752. * dropped before invoking the cmd and going to sleep and is reacquired
  753. * afterwards.
  754. */
  755. #define wait_event_lock_irq_cmd(wq_head, condition, lock, cmd) \
  756. do { \
  757. if (condition) \
  758. break; \
  759. __wait_event_lock_irq(wq_head, condition, lock, cmd); \
  760. } while (0)
  761. /**
  762. * wait_event_lock_irq - sleep until a condition gets true. The
  763. * condition is checked under the lock. This
  764. * is expected to be called with the lock
  765. * taken.
  766. * @wq_head: the waitqueue to wait on
  767. * @condition: a C expression for the event to wait for
  768. * @lock: a locked spinlock_t, which will be released before schedule()
  769. * and reacquired afterwards.
  770. *
  771. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  772. * @condition evaluates to true. The @condition is checked each time
  773. * the waitqueue @wq_head is woken up.
  774. *
  775. * wake_up() has to be called after changing any variable that could
  776. * change the result of the wait condition.
  777. *
  778. * This is supposed to be called while holding the lock. The lock is
  779. * dropped before going to sleep and is reacquired afterwards.
  780. */
  781. #define wait_event_lock_irq(wq_head, condition, lock) \
  782. do { \
  783. if (condition) \
  784. break; \
  785. __wait_event_lock_irq(wq_head, condition, lock, ); \
  786. } while (0)
  787. #define __wait_event_interruptible_lock_irq(wq_head, condition, lock, cmd) \
  788. ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \
  789. spin_unlock_irq(&lock); \
  790. cmd; \
  791. schedule(); \
  792. spin_lock_irq(&lock))
  793. /**
  794. * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
  795. * The condition is checked under the lock. This is expected to
  796. * be called with the lock taken.
  797. * @wq_head: the waitqueue to wait on
  798. * @condition: a C expression for the event to wait for
  799. * @lock: a locked spinlock_t, which will be released before cmd and
  800. * schedule() and reacquired afterwards.
  801. * @cmd: a command which is invoked outside the critical section before
  802. * sleep
  803. *
  804. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  805. * @condition evaluates to true or a signal is received. The @condition is
  806. * checked each time the waitqueue @wq_head is woken up.
  807. *
  808. * wake_up() has to be called after changing any variable that could
  809. * change the result of the wait condition.
  810. *
  811. * This is supposed to be called while holding the lock. The lock is
  812. * dropped before invoking the cmd and going to sleep and is reacquired
  813. * afterwards.
  814. *
  815. * The macro will return -ERESTARTSYS if it was interrupted by a signal
  816. * and 0 if @condition evaluated to true.
  817. */
  818. #define wait_event_interruptible_lock_irq_cmd(wq_head, condition, lock, cmd) \
  819. ({ \
  820. int __ret = 0; \
  821. if (!(condition)) \
  822. __ret = __wait_event_interruptible_lock_irq(wq_head, \
  823. condition, lock, cmd); \
  824. __ret; \
  825. })
  826. /**
  827. * wait_event_interruptible_lock_irq - sleep until a condition gets true.
  828. * The condition is checked under the lock. This is expected
  829. * to be called with the lock taken.
  830. * @wq_head: the waitqueue to wait on
  831. * @condition: a C expression for the event to wait for
  832. * @lock: a locked spinlock_t, which will be released before schedule()
  833. * and reacquired afterwards.
  834. *
  835. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  836. * @condition evaluates to true or signal is received. The @condition is
  837. * checked each time the waitqueue @wq_head is woken up.
  838. *
  839. * wake_up() has to be called after changing any variable that could
  840. * change the result of the wait condition.
  841. *
  842. * This is supposed to be called while holding the lock. The lock is
  843. * dropped before going to sleep and is reacquired afterwards.
  844. *
  845. * The macro will return -ERESTARTSYS if it was interrupted by a signal
  846. * and 0 if @condition evaluated to true.
  847. */
  848. #define wait_event_interruptible_lock_irq(wq_head, condition, lock) \
  849. ({ \
  850. int __ret = 0; \
  851. if (!(condition)) \
  852. __ret = __wait_event_interruptible_lock_irq(wq_head, \
  853. condition, lock,); \
  854. __ret; \
  855. })
  856. #define __wait_event_interruptible_lock_irq_timeout(wq_head, condition, \
  857. lock, timeout) \
  858. ___wait_event(wq_head, ___wait_cond_timeout(condition), \
  859. TASK_INTERRUPTIBLE, 0, timeout, \
  860. spin_unlock_irq(&lock); \
  861. __ret = schedule_timeout(__ret); \
  862. spin_lock_irq(&lock));
  863. /**
  864. * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets
  865. * true or a timeout elapses. The condition is checked under
  866. * the lock. This is expected to be called with the lock taken.
  867. * @wq_head: the waitqueue to wait on
  868. * @condition: a C expression for the event to wait for
  869. * @lock: a locked spinlock_t, which will be released before schedule()
  870. * and reacquired afterwards.
  871. * @timeout: timeout, in jiffies
  872. *
  873. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  874. * @condition evaluates to true or signal is received. The @condition is
  875. * checked each time the waitqueue @wq_head is woken up.
  876. *
  877. * wake_up() has to be called after changing any variable that could
  878. * change the result of the wait condition.
  879. *
  880. * This is supposed to be called while holding the lock. The lock is
  881. * dropped before going to sleep and is reacquired afterwards.
  882. *
  883. * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
  884. * was interrupted by a signal, and the remaining jiffies otherwise
  885. * if the condition evaluated to true before the timeout elapsed.
  886. */
  887. #define wait_event_interruptible_lock_irq_timeout(wq_head, condition, lock, \
  888. timeout) \
  889. ({ \
  890. long __ret = timeout; \
  891. if (!___wait_cond_timeout(condition)) \
  892. __ret = __wait_event_interruptible_lock_irq_timeout( \
  893. wq_head, condition, lock, timeout); \
  894. __ret; \
  895. })
  896. /*
  897. * Waitqueues which are removed from the waitqueue_head at wakeup time
  898. */
  899. void prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
  900. void prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
  901. long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
  902. void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
  903. long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout);
  904. int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
  905. int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
  906. #define DEFINE_WAIT_FUNC(name, function) \
  907. struct wait_queue_entry name = { \
  908. .private = current, \
  909. .func = function, \
  910. .entry = LIST_HEAD_INIT((name).entry), \
  911. }
  912. #define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
  913. #define init_wait(wait) \
  914. do { \
  915. (wait)->private = current; \
  916. (wait)->func = autoremove_wake_function; \
  917. INIT_LIST_HEAD(&(wait)->entry); \
  918. (wait)->flags = 0; \
  919. } while (0)
  920. #endif /* _LINUX_WAIT_H */