wait.h 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968
  1. #ifndef _LINUX_WAIT_H
  2. #define _LINUX_WAIT_H
  3. /*
  4. * Linux wait queue related types and methods
  5. */
  6. #include <linux/list.h>
  7. #include <linux/stddef.h>
  8. #include <linux/spinlock.h>
  9. #include <asm/current.h>
  10. #include <uapi/linux/wait.h>
  11. typedef struct wait_queue_entry wait_queue_entry_t;
  12. typedef int (*wait_queue_func_t)(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
  13. int default_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
  14. /* wait_queue_entry::flags */
  15. #define WQ_FLAG_EXCLUSIVE 0x01
  16. #define WQ_FLAG_WOKEN 0x02
  17. /*
  18. * A single wait-queue entry structure:
  19. */
  20. struct wait_queue_entry {
  21. unsigned int flags;
  22. void *private;
  23. wait_queue_func_t func;
  24. struct list_head entry;
  25. };
  26. struct wait_queue_head {
  27. spinlock_t lock;
  28. struct list_head head;
  29. };
  30. typedef struct wait_queue_head wait_queue_head_t;
  31. struct task_struct;
  32. /*
  33. * Macros for declaration and initialisaton of the datatypes
  34. */
  35. #define __WAITQUEUE_INITIALIZER(name, tsk) { \
  36. .private = tsk, \
  37. .func = default_wake_function, \
  38. .entry = { NULL, NULL } }
  39. #define DECLARE_WAITQUEUE(name, tsk) \
  40. struct wait_queue_entry name = __WAITQUEUE_INITIALIZER(name, tsk)
  41. #define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
  42. .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
  43. .head = { &(name).head, &(name).head } }
  44. #define DECLARE_WAIT_QUEUE_HEAD(name) \
  45. struct wait_queue_head name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
  46. extern void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *);
  47. #define init_waitqueue_head(wq_head) \
  48. do { \
  49. static struct lock_class_key __key; \
  50. \
  51. __init_waitqueue_head((wq_head), #wq_head, &__key); \
  52. } while (0)
  53. #ifdef CONFIG_LOCKDEP
  54. # define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
  55. ({ init_waitqueue_head(&name); name; })
  56. # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
  57. struct wait_queue_head name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
  58. #else
  59. # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
  60. #endif
  61. static inline void init_waitqueue_entry(struct wait_queue_entry *wq_entry, struct task_struct *p)
  62. {
  63. wq_entry->flags = 0;
  64. wq_entry->private = p;
  65. wq_entry->func = default_wake_function;
  66. }
  67. static inline void
  68. init_waitqueue_func_entry(struct wait_queue_entry *wq_entry, wait_queue_func_t func)
  69. {
  70. wq_entry->flags = 0;
  71. wq_entry->private = NULL;
  72. wq_entry->func = func;
  73. }
  74. /**
  75. * waitqueue_active -- locklessly test for waiters on the queue
  76. * @wq_head: the waitqueue to test for waiters
  77. *
  78. * returns true if the wait list is not empty
  79. *
  80. * NOTE: this function is lockless and requires care, incorrect usage _will_
  81. * lead to sporadic and non-obvious failure.
  82. *
  83. * Use either while holding wait_queue_head::lock or when used for wakeups
  84. * with an extra smp_mb() like:
  85. *
  86. * CPU0 - waker CPU1 - waiter
  87. *
  88. * for (;;) {
  89. * @cond = true; prepare_to_wait(&wq_head, &wait, state);
  90. * smp_mb(); // smp_mb() from set_current_state()
  91. * if (waitqueue_active(wq_head)) if (@cond)
  92. * wake_up(wq_head); break;
  93. * schedule();
  94. * }
  95. * finish_wait(&wq_head, &wait);
  96. *
  97. * Because without the explicit smp_mb() it's possible for the
  98. * waitqueue_active() load to get hoisted over the @cond store such that we'll
  99. * observe an empty wait list while the waiter might not observe @cond.
  100. *
  101. * Also note that this 'optimization' trades a spin_lock() for an smp_mb(),
  102. * which (when the lock is uncontended) are of roughly equal cost.
  103. */
  104. static inline int waitqueue_active(struct wait_queue_head *wq_head)
  105. {
  106. return !list_empty(&wq_head->head);
  107. }
  108. /**
  109. * wq_has_sleeper - check if there are any waiting processes
  110. * @wq_head: wait queue head
  111. *
  112. * Returns true if wq_head has waiting processes
  113. *
  114. * Please refer to the comment for waitqueue_active.
  115. */
  116. static inline bool wq_has_sleeper(struct wait_queue_head *wq_head)
  117. {
  118. /*
  119. * We need to be sure we are in sync with the
  120. * add_wait_queue modifications to the wait queue.
  121. *
  122. * This memory barrier should be paired with one on the
  123. * waiting side.
  124. */
  125. smp_mb();
  126. return waitqueue_active(wq_head);
  127. }
  128. extern void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
  129. extern void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
  130. extern void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
  131. static inline void __add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
  132. {
  133. list_add(&wq_entry->entry, &wq_head->head);
  134. }
  135. /*
  136. * Used for wake-one threads:
  137. */
  138. static inline void
  139. __add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
  140. {
  141. wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
  142. __add_wait_queue(wq_head, wq_entry);
  143. }
  144. static inline void __add_wait_queue_entry_tail(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
  145. {
  146. list_add_tail(&wq_entry->entry, &wq_head->head);
  147. }
  148. static inline void
  149. __add_wait_queue_entry_tail_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
  150. {
  151. wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
  152. __add_wait_queue_entry_tail(wq_head, wq_entry);
  153. }
  154. static inline void
  155. __remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
  156. {
  157. list_del(&wq_entry->entry);
  158. }
  159. void __wake_up(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
  160. void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
  161. void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
  162. void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr);
  163. void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr);
  164. #define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
  165. #define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
  166. #define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL)
  167. #define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1)
  168. #define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0)
  169. #define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
  170. #define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
  171. #define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
  172. #define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
  173. /*
  174. * Wakeup macros to be used to report events to the targets.
  175. */
  176. #define wake_up_poll(x, m) \
  177. __wake_up(x, TASK_NORMAL, 1, (void *) (m))
  178. #define wake_up_locked_poll(x, m) \
  179. __wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
  180. #define wake_up_interruptible_poll(x, m) \
  181. __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
  182. #define wake_up_interruptible_sync_poll(x, m) \
  183. __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))
  184. #define ___wait_cond_timeout(condition) \
  185. ({ \
  186. bool __cond = (condition); \
  187. if (__cond && !__ret) \
  188. __ret = 1; \
  189. __cond || !__ret; \
  190. })
  191. #define ___wait_is_interruptible(state) \
  192. (!__builtin_constant_p(state) || \
  193. state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE) \
  194. extern void init_wait_entry(struct wait_queue_entry *wq_entry, int flags);
  195. /*
  196. * The below macro ___wait_event() has an explicit shadow of the __ret
  197. * variable when used from the wait_event_*() macros.
  198. *
  199. * This is so that both can use the ___wait_cond_timeout() construct
  200. * to wrap the condition.
  201. *
  202. * The type inconsistency of the wait_event_*() __ret variable is also
  203. * on purpose; we use long where we can return timeout values and int
  204. * otherwise.
  205. */
  206. #define ___wait_event(wq_head, condition, state, exclusive, ret, cmd) \
  207. ({ \
  208. __label__ __out; \
  209. struct wait_queue_entry __wq_entry; \
  210. long __ret = ret; /* explicit shadow */ \
  211. \
  212. init_wait_entry(&__wq_entry, exclusive ? WQ_FLAG_EXCLUSIVE : 0); \
  213. for (;;) { \
  214. long __int = prepare_to_wait_event(&wq_head, &__wq_entry, state);\
  215. \
  216. if (condition) \
  217. break; \
  218. \
  219. if (___wait_is_interruptible(state) && __int) { \
  220. __ret = __int; \
  221. goto __out; \
  222. } \
  223. \
  224. cmd; \
  225. } \
  226. finish_wait(&wq_head, &__wq_entry); \
  227. __out: __ret; \
  228. })
  229. #define __wait_event(wq_head, condition) \
  230. (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
  231. schedule())
  232. /**
  233. * wait_event - sleep until a condition gets true
  234. * @wq_head: the waitqueue to wait on
  235. * @condition: a C expression for the event to wait for
  236. *
  237. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  238. * @condition evaluates to true. The @condition is checked each time
  239. * the waitqueue @wq_head is woken up.
  240. *
  241. * wake_up() has to be called after changing any variable that could
  242. * change the result of the wait condition.
  243. */
  244. #define wait_event(wq_head, condition) \
  245. do { \
  246. might_sleep(); \
  247. if (condition) \
  248. break; \
  249. __wait_event(wq_head, condition); \
  250. } while (0)
  251. #define __io_wait_event(wq_head, condition) \
  252. (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
  253. io_schedule())
  254. /*
  255. * io_wait_event() -- like wait_event() but with io_schedule()
  256. */
  257. #define io_wait_event(wq_head, condition) \
  258. do { \
  259. might_sleep(); \
  260. if (condition) \
  261. break; \
  262. __io_wait_event(wq_head, condition); \
  263. } while (0)
  264. #define __wait_event_freezable(wq_head, condition) \
  265. ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \
  266. schedule(); try_to_freeze())
  267. /**
  268. * wait_event_freezable - sleep (or freeze) until a condition gets true
  269. * @wq_head: the waitqueue to wait on
  270. * @condition: a C expression for the event to wait for
  271. *
  272. * The process is put to sleep (TASK_INTERRUPTIBLE -- so as not to contribute
  273. * to system load) until the @condition evaluates to true. The
  274. * @condition is checked each time the waitqueue @wq_head is woken up.
  275. *
  276. * wake_up() has to be called after changing any variable that could
  277. * change the result of the wait condition.
  278. */
  279. #define wait_event_freezable(wq_head, condition) \
  280. ({ \
  281. int __ret = 0; \
  282. might_sleep(); \
  283. if (!(condition)) \
  284. __ret = __wait_event_freezable(wq_head, condition); \
  285. __ret; \
  286. })
  287. #define __wait_event_timeout(wq_head, condition, timeout) \
  288. ___wait_event(wq_head, ___wait_cond_timeout(condition), \
  289. TASK_UNINTERRUPTIBLE, 0, timeout, \
  290. __ret = schedule_timeout(__ret))
  291. /**
  292. * wait_event_timeout - sleep until a condition gets true or a timeout elapses
  293. * @wq_head: the waitqueue to wait on
  294. * @condition: a C expression for the event to wait for
  295. * @timeout: timeout, in jiffies
  296. *
  297. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  298. * @condition evaluates to true. The @condition is checked each time
  299. * the waitqueue @wq_head is woken up.
  300. *
  301. * wake_up() has to be called after changing any variable that could
  302. * change the result of the wait condition.
  303. *
  304. * Returns:
  305. * 0 if the @condition evaluated to %false after the @timeout elapsed,
  306. * 1 if the @condition evaluated to %true after the @timeout elapsed,
  307. * or the remaining jiffies (at least 1) if the @condition evaluated
  308. * to %true before the @timeout elapsed.
  309. */
  310. #define wait_event_timeout(wq_head, condition, timeout) \
  311. ({ \
  312. long __ret = timeout; \
  313. might_sleep(); \
  314. if (!___wait_cond_timeout(condition)) \
  315. __ret = __wait_event_timeout(wq_head, condition, timeout); \
  316. __ret; \
  317. })
  318. #define __wait_event_freezable_timeout(wq_head, condition, timeout) \
  319. ___wait_event(wq_head, ___wait_cond_timeout(condition), \
  320. TASK_INTERRUPTIBLE, 0, timeout, \
  321. __ret = schedule_timeout(__ret); try_to_freeze())
  322. /*
  323. * like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid
  324. * increasing load and is freezable.
  325. */
  326. #define wait_event_freezable_timeout(wq_head, condition, timeout) \
  327. ({ \
  328. long __ret = timeout; \
  329. might_sleep(); \
  330. if (!___wait_cond_timeout(condition)) \
  331. __ret = __wait_event_freezable_timeout(wq_head, condition, timeout); \
  332. __ret; \
  333. })
  334. #define __wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2) \
  335. (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 1, 0, \
  336. cmd1; schedule(); cmd2)
  337. /*
  338. * Just like wait_event_cmd(), except it sets exclusive flag
  339. */
  340. #define wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2) \
  341. do { \
  342. if (condition) \
  343. break; \
  344. __wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2); \
  345. } while (0)
  346. #define __wait_event_cmd(wq_head, condition, cmd1, cmd2) \
  347. (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
  348. cmd1; schedule(); cmd2)
  349. /**
  350. * wait_event_cmd - sleep until a condition gets true
  351. * @wq_head: the waitqueue to wait on
  352. * @condition: a C expression for the event to wait for
  353. * @cmd1: the command will be executed before sleep
  354. * @cmd2: the command will be executed after sleep
  355. *
  356. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  357. * @condition evaluates to true. The @condition is checked each time
  358. * the waitqueue @wq_head is woken up.
  359. *
  360. * wake_up() has to be called after changing any variable that could
  361. * change the result of the wait condition.
  362. */
  363. #define wait_event_cmd(wq_head, condition, cmd1, cmd2) \
  364. do { \
  365. if (condition) \
  366. break; \
  367. __wait_event_cmd(wq_head, condition, cmd1, cmd2); \
  368. } while (0)
  369. #define __wait_event_interruptible(wq_head, condition) \
  370. ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \
  371. schedule())
  372. /**
  373. * wait_event_interruptible - sleep until a condition gets true
  374. * @wq_head: the waitqueue to wait on
  375. * @condition: a C expression for the event to wait for
  376. *
  377. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  378. * @condition evaluates to true or a signal is received.
  379. * The @condition is checked each time the waitqueue @wq_head is woken up.
  380. *
  381. * wake_up() has to be called after changing any variable that could
  382. * change the result of the wait condition.
  383. *
  384. * The function will return -ERESTARTSYS if it was interrupted by a
  385. * signal and 0 if @condition evaluated to true.
  386. */
  387. #define wait_event_interruptible(wq_head, condition) \
  388. ({ \
  389. int __ret = 0; \
  390. might_sleep(); \
  391. if (!(condition)) \
  392. __ret = __wait_event_interruptible(wq_head, condition); \
  393. __ret; \
  394. })
  395. #define __wait_event_interruptible_timeout(wq_head, condition, timeout) \
  396. ___wait_event(wq_head, ___wait_cond_timeout(condition), \
  397. TASK_INTERRUPTIBLE, 0, timeout, \
  398. __ret = schedule_timeout(__ret))
  399. /**
  400. * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
  401. * @wq_head: the waitqueue to wait on
  402. * @condition: a C expression for the event to wait for
  403. * @timeout: timeout, in jiffies
  404. *
  405. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  406. * @condition evaluates to true or a signal is received.
  407. * The @condition is checked each time the waitqueue @wq_head is woken up.
  408. *
  409. * wake_up() has to be called after changing any variable that could
  410. * change the result of the wait condition.
  411. *
  412. * Returns:
  413. * 0 if the @condition evaluated to %false after the @timeout elapsed,
  414. * 1 if the @condition evaluated to %true after the @timeout elapsed,
  415. * the remaining jiffies (at least 1) if the @condition evaluated
  416. * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
  417. * interrupted by a signal.
  418. */
  419. #define wait_event_interruptible_timeout(wq_head, condition, timeout) \
  420. ({ \
  421. long __ret = timeout; \
  422. might_sleep(); \
  423. if (!___wait_cond_timeout(condition)) \
  424. __ret = __wait_event_interruptible_timeout(wq_head, \
  425. condition, timeout); \
  426. __ret; \
  427. })
  428. #define __wait_event_hrtimeout(wq_head, condition, timeout, state) \
  429. ({ \
  430. int __ret = 0; \
  431. struct hrtimer_sleeper __t; \
  432. \
  433. hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); \
  434. hrtimer_init_sleeper(&__t, current); \
  435. if ((timeout) != KTIME_MAX) \
  436. hrtimer_start_range_ns(&__t.timer, timeout, \
  437. current->timer_slack_ns, \
  438. HRTIMER_MODE_REL); \
  439. \
  440. __ret = ___wait_event(wq_head, condition, state, 0, 0, \
  441. if (!__t.task) { \
  442. __ret = -ETIME; \
  443. break; \
  444. } \
  445. schedule()); \
  446. \
  447. hrtimer_cancel(&__t.timer); \
  448. destroy_hrtimer_on_stack(&__t.timer); \
  449. __ret; \
  450. })
  451. /**
  452. * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses
  453. * @wq_head: the waitqueue to wait on
  454. * @condition: a C expression for the event to wait for
  455. * @timeout: timeout, as a ktime_t
  456. *
  457. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  458. * @condition evaluates to true or a signal is received.
  459. * The @condition is checked each time the waitqueue @wq_head is woken up.
  460. *
  461. * wake_up() has to be called after changing any variable that could
  462. * change the result of the wait condition.
  463. *
  464. * The function returns 0 if @condition became true, or -ETIME if the timeout
  465. * elapsed.
  466. */
  467. #define wait_event_hrtimeout(wq_head, condition, timeout) \
  468. ({ \
  469. int __ret = 0; \
  470. might_sleep(); \
  471. if (!(condition)) \
  472. __ret = __wait_event_hrtimeout(wq_head, condition, timeout, \
  473. TASK_UNINTERRUPTIBLE); \
  474. __ret; \
  475. })
  476. /**
  477. * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
  478. * @wq: the waitqueue to wait on
  479. * @condition: a C expression for the event to wait for
  480. * @timeout: timeout, as a ktime_t
  481. *
  482. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  483. * @condition evaluates to true or a signal is received.
  484. * The @condition is checked each time the waitqueue @wq is woken up.
  485. *
  486. * wake_up() has to be called after changing any variable that could
  487. * change the result of the wait condition.
  488. *
  489. * The function returns 0 if @condition became true, -ERESTARTSYS if it was
  490. * interrupted by a signal, or -ETIME if the timeout elapsed.
  491. */
  492. #define wait_event_interruptible_hrtimeout(wq, condition, timeout) \
  493. ({ \
  494. long __ret = 0; \
  495. might_sleep(); \
  496. if (!(condition)) \
  497. __ret = __wait_event_hrtimeout(wq, condition, timeout, \
  498. TASK_INTERRUPTIBLE); \
  499. __ret; \
  500. })
  501. #define __wait_event_interruptible_exclusive(wq, condition) \
  502. ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
  503. schedule())
  504. #define wait_event_interruptible_exclusive(wq, condition) \
  505. ({ \
  506. int __ret = 0; \
  507. might_sleep(); \
  508. if (!(condition)) \
  509. __ret = __wait_event_interruptible_exclusive(wq, condition); \
  510. __ret; \
  511. })
  512. #define __wait_event_killable_exclusive(wq, condition) \
  513. ___wait_event(wq, condition, TASK_KILLABLE, 1, 0, \
  514. schedule())
  515. #define wait_event_killable_exclusive(wq, condition) \
  516. ({ \
  517. int __ret = 0; \
  518. might_sleep(); \
  519. if (!(condition)) \
  520. __ret = __wait_event_killable_exclusive(wq, condition); \
  521. __ret; \
  522. })
  523. #define __wait_event_freezable_exclusive(wq, condition) \
  524. ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
  525. schedule(); try_to_freeze())
  526. #define wait_event_freezable_exclusive(wq, condition) \
  527. ({ \
  528. int __ret = 0; \
  529. might_sleep(); \
  530. if (!(condition)) \
  531. __ret = __wait_event_freezable_exclusive(wq, condition); \
  532. __ret; \
  533. })
  534. extern int do_wait_intr(wait_queue_head_t *, wait_queue_entry_t *);
  535. extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_entry_t *);
  536. #define __wait_event_interruptible_locked(wq, condition, exclusive, fn) \
  537. ({ \
  538. int __ret; \
  539. DEFINE_WAIT(__wait); \
  540. if (exclusive) \
  541. __wait.flags |= WQ_FLAG_EXCLUSIVE; \
  542. do { \
  543. __ret = fn(&(wq), &__wait); \
  544. if (__ret) \
  545. break; \
  546. } while (!(condition)); \
  547. __remove_wait_queue(&(wq), &__wait); \
  548. __set_current_state(TASK_RUNNING); \
  549. __ret; \
  550. })
  551. /**
  552. * wait_event_interruptible_locked - sleep until a condition gets true
  553. * @wq: the waitqueue to wait on
  554. * @condition: a C expression for the event to wait for
  555. *
  556. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  557. * @condition evaluates to true or a signal is received.
  558. * The @condition is checked each time the waitqueue @wq is woken up.
  559. *
  560. * It must be called with wq.lock being held. This spinlock is
  561. * unlocked while sleeping but @condition testing is done while lock
  562. * is held and when this macro exits the lock is held.
  563. *
  564. * The lock is locked/unlocked using spin_lock()/spin_unlock()
  565. * functions which must match the way they are locked/unlocked outside
  566. * of this macro.
  567. *
  568. * wake_up_locked() has to be called after changing any variable that could
  569. * change the result of the wait condition.
  570. *
  571. * The function will return -ERESTARTSYS if it was interrupted by a
  572. * signal and 0 if @condition evaluated to true.
  573. */
  574. #define wait_event_interruptible_locked(wq, condition) \
  575. ((condition) \
  576. ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr))
  577. /**
  578. * wait_event_interruptible_locked_irq - sleep until a condition gets true
  579. * @wq: the waitqueue to wait on
  580. * @condition: a C expression for the event to wait for
  581. *
  582. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  583. * @condition evaluates to true or a signal is received.
  584. * The @condition is checked each time the waitqueue @wq is woken up.
  585. *
  586. * It must be called with wq.lock being held. This spinlock is
  587. * unlocked while sleeping but @condition testing is done while lock
  588. * is held and when this macro exits the lock is held.
  589. *
  590. * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
  591. * functions which must match the way they are locked/unlocked outside
  592. * of this macro.
  593. *
  594. * wake_up_locked() has to be called after changing any variable that could
  595. * change the result of the wait condition.
  596. *
  597. * The function will return -ERESTARTSYS if it was interrupted by a
  598. * signal and 0 if @condition evaluated to true.
  599. */
  600. #define wait_event_interruptible_locked_irq(wq, condition) \
  601. ((condition) \
  602. ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr_irq))
  603. /**
  604. * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
  605. * @wq: the waitqueue to wait on
  606. * @condition: a C expression for the event to wait for
  607. *
  608. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  609. * @condition evaluates to true or a signal is received.
  610. * The @condition is checked each time the waitqueue @wq is woken up.
  611. *
  612. * It must be called with wq.lock being held. This spinlock is
  613. * unlocked while sleeping but @condition testing is done while lock
  614. * is held and when this macro exits the lock is held.
  615. *
  616. * The lock is locked/unlocked using spin_lock()/spin_unlock()
  617. * functions which must match the way they are locked/unlocked outside
  618. * of this macro.
  619. *
  620. * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
  621. * set thus when other process waits process on the list if this
  622. * process is awaken further processes are not considered.
  623. *
  624. * wake_up_locked() has to be called after changing any variable that could
  625. * change the result of the wait condition.
  626. *
  627. * The function will return -ERESTARTSYS if it was interrupted by a
  628. * signal and 0 if @condition evaluated to true.
  629. */
  630. #define wait_event_interruptible_exclusive_locked(wq, condition) \
  631. ((condition) \
  632. ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr))
  633. /**
  634. * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
  635. * @wq: the waitqueue to wait on
  636. * @condition: a C expression for the event to wait for
  637. *
  638. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  639. * @condition evaluates to true or a signal is received.
  640. * The @condition is checked each time the waitqueue @wq is woken up.
  641. *
  642. * It must be called with wq.lock being held. This spinlock is
  643. * unlocked while sleeping but @condition testing is done while lock
  644. * is held and when this macro exits the lock is held.
  645. *
  646. * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
  647. * functions which must match the way they are locked/unlocked outside
  648. * of this macro.
  649. *
  650. * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
  651. * set thus when other process waits process on the list if this
  652. * process is awaken further processes are not considered.
  653. *
  654. * wake_up_locked() has to be called after changing any variable that could
  655. * change the result of the wait condition.
  656. *
  657. * The function will return -ERESTARTSYS if it was interrupted by a
  658. * signal and 0 if @condition evaluated to true.
  659. */
  660. #define wait_event_interruptible_exclusive_locked_irq(wq, condition) \
  661. ((condition) \
  662. ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr_irq))
  663. #define __wait_event_killable(wq, condition) \
  664. ___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
  665. /**
  666. * wait_event_killable - sleep until a condition gets true
  667. * @wq_head: the waitqueue to wait on
  668. * @condition: a C expression for the event to wait for
  669. *
  670. * The process is put to sleep (TASK_KILLABLE) until the
  671. * @condition evaluates to true or a signal is received.
  672. * The @condition is checked each time the waitqueue @wq_head is woken up.
  673. *
  674. * wake_up() has to be called after changing any variable that could
  675. * change the result of the wait condition.
  676. *
  677. * The function will return -ERESTARTSYS if it was interrupted by a
  678. * signal and 0 if @condition evaluated to true.
  679. */
  680. #define wait_event_killable(wq_head, condition) \
  681. ({ \
  682. int __ret = 0; \
  683. might_sleep(); \
  684. if (!(condition)) \
  685. __ret = __wait_event_killable(wq_head, condition); \
  686. __ret; \
  687. })
  688. #define __wait_event_lock_irq(wq_head, condition, lock, cmd) \
  689. (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
  690. spin_unlock_irq(&lock); \
  691. cmd; \
  692. schedule(); \
  693. spin_lock_irq(&lock))
  694. /**
  695. * wait_event_lock_irq_cmd - sleep until a condition gets true. The
  696. * condition is checked under the lock. This
  697. * is expected to be called with the lock
  698. * taken.
  699. * @wq_head: the waitqueue to wait on
  700. * @condition: a C expression for the event to wait for
  701. * @lock: a locked spinlock_t, which will be released before cmd
  702. * and schedule() and reacquired afterwards.
  703. * @cmd: a command which is invoked outside the critical section before
  704. * sleep
  705. *
  706. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  707. * @condition evaluates to true. The @condition is checked each time
  708. * the waitqueue @wq_head is woken up.
  709. *
  710. * wake_up() has to be called after changing any variable that could
  711. * change the result of the wait condition.
  712. *
  713. * This is supposed to be called while holding the lock. The lock is
  714. * dropped before invoking the cmd and going to sleep and is reacquired
  715. * afterwards.
  716. */
  717. #define wait_event_lock_irq_cmd(wq_head, condition, lock, cmd) \
  718. do { \
  719. if (condition) \
  720. break; \
  721. __wait_event_lock_irq(wq_head, condition, lock, cmd); \
  722. } while (0)
  723. /**
  724. * wait_event_lock_irq - sleep until a condition gets true. The
  725. * condition is checked under the lock. This
  726. * is expected to be called with the lock
  727. * taken.
  728. * @wq_head: the waitqueue to wait on
  729. * @condition: a C expression for the event to wait for
  730. * @lock: a locked spinlock_t, which will be released before schedule()
  731. * and reacquired afterwards.
  732. *
  733. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  734. * @condition evaluates to true. The @condition is checked each time
  735. * the waitqueue @wq_head is woken up.
  736. *
  737. * wake_up() has to be called after changing any variable that could
  738. * change the result of the wait condition.
  739. *
  740. * This is supposed to be called while holding the lock. The lock is
  741. * dropped before going to sleep and is reacquired afterwards.
  742. */
  743. #define wait_event_lock_irq(wq_head, condition, lock) \
  744. do { \
  745. if (condition) \
  746. break; \
  747. __wait_event_lock_irq(wq_head, condition, lock, ); \
  748. } while (0)
  749. #define __wait_event_interruptible_lock_irq(wq_head, condition, lock, cmd) \
  750. ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \
  751. spin_unlock_irq(&lock); \
  752. cmd; \
  753. schedule(); \
  754. spin_lock_irq(&lock))
  755. /**
  756. * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
  757. * The condition is checked under the lock. This is expected to
  758. * be called with the lock taken.
  759. * @wq_head: the waitqueue to wait on
  760. * @condition: a C expression for the event to wait for
  761. * @lock: a locked spinlock_t, which will be released before cmd and
  762. * schedule() and reacquired afterwards.
  763. * @cmd: a command which is invoked outside the critical section before
  764. * sleep
  765. *
  766. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  767. * @condition evaluates to true or a signal is received. The @condition is
  768. * checked each time the waitqueue @wq_head is woken up.
  769. *
  770. * wake_up() has to be called after changing any variable that could
  771. * change the result of the wait condition.
  772. *
  773. * This is supposed to be called while holding the lock. The lock is
  774. * dropped before invoking the cmd and going to sleep and is reacquired
  775. * afterwards.
  776. *
  777. * The macro will return -ERESTARTSYS if it was interrupted by a signal
  778. * and 0 if @condition evaluated to true.
  779. */
  780. #define wait_event_interruptible_lock_irq_cmd(wq_head, condition, lock, cmd) \
  781. ({ \
  782. int __ret = 0; \
  783. if (!(condition)) \
  784. __ret = __wait_event_interruptible_lock_irq(wq_head, \
  785. condition, lock, cmd); \
  786. __ret; \
  787. })
  788. /**
  789. * wait_event_interruptible_lock_irq - sleep until a condition gets true.
  790. * The condition is checked under the lock. This is expected
  791. * to be called with the lock taken.
  792. * @wq_head: the waitqueue to wait on
  793. * @condition: a C expression for the event to wait for
  794. * @lock: a locked spinlock_t, which will be released before schedule()
  795. * and reacquired afterwards.
  796. *
  797. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  798. * @condition evaluates to true or signal is received. The @condition is
  799. * checked each time the waitqueue @wq_head is woken up.
  800. *
  801. * wake_up() has to be called after changing any variable that could
  802. * change the result of the wait condition.
  803. *
  804. * This is supposed to be called while holding the lock. The lock is
  805. * dropped before going to sleep and is reacquired afterwards.
  806. *
  807. * The macro will return -ERESTARTSYS if it was interrupted by a signal
  808. * and 0 if @condition evaluated to true.
  809. */
  810. #define wait_event_interruptible_lock_irq(wq_head, condition, lock) \
  811. ({ \
  812. int __ret = 0; \
  813. if (!(condition)) \
  814. __ret = __wait_event_interruptible_lock_irq(wq_head, \
  815. condition, lock,); \
  816. __ret; \
  817. })
  818. #define __wait_event_interruptible_lock_irq_timeout(wq_head, condition, \
  819. lock, timeout) \
  820. ___wait_event(wq_head, ___wait_cond_timeout(condition), \
  821. TASK_INTERRUPTIBLE, 0, timeout, \
  822. spin_unlock_irq(&lock); \
  823. __ret = schedule_timeout(__ret); \
  824. spin_lock_irq(&lock));
  825. /**
  826. * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets
  827. * true or a timeout elapses. The condition is checked under
  828. * the lock. This is expected to be called with the lock taken.
  829. * @wq_head: the waitqueue to wait on
  830. * @condition: a C expression for the event to wait for
  831. * @lock: a locked spinlock_t, which will be released before schedule()
  832. * and reacquired afterwards.
  833. * @timeout: timeout, in jiffies
  834. *
  835. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  836. * @condition evaluates to true or signal is received. The @condition is
  837. * checked each time the waitqueue @wq_head is woken up.
  838. *
  839. * wake_up() has to be called after changing any variable that could
  840. * change the result of the wait condition.
  841. *
  842. * This is supposed to be called while holding the lock. The lock is
  843. * dropped before going to sleep and is reacquired afterwards.
  844. *
  845. * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
  846. * was interrupted by a signal, and the remaining jiffies otherwise
  847. * if the condition evaluated to true before the timeout elapsed.
  848. */
  849. #define wait_event_interruptible_lock_irq_timeout(wq_head, condition, lock, \
  850. timeout) \
  851. ({ \
  852. long __ret = timeout; \
  853. if (!___wait_cond_timeout(condition)) \
  854. __ret = __wait_event_interruptible_lock_irq_timeout( \
  855. wq_head, condition, lock, timeout); \
  856. __ret; \
  857. })
  858. /*
  859. * Waitqueues which are removed from the waitqueue_head at wakeup time
  860. */
  861. void prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
  862. void prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
  863. long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
  864. void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
  865. long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout);
  866. int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
  867. int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
  868. #define DEFINE_WAIT_FUNC(name, function) \
  869. struct wait_queue_entry name = { \
  870. .private = current, \
  871. .func = function, \
  872. .entry = LIST_HEAD_INIT((name).entry), \
  873. }
  874. #define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
  875. #define init_wait(wait) \
  876. do { \
  877. (wait)->private = current; \
  878. (wait)->func = autoremove_wake_function; \
  879. INIT_LIST_HEAD(&(wait)->entry); \
  880. (wait)->flags = 0; \
  881. } while (0)
  882. #endif /* _LINUX_WAIT_H */