wait.h 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033
  1. #ifndef _LINUX_WAIT_H
  2. #define _LINUX_WAIT_H
  3. /*
  4. * Linux wait queue related types and methods
  5. */
  6. #include <linux/list.h>
  7. #include <linux/stddef.h>
  8. #include <linux/spinlock.h>
  9. #include <asm/current.h>
  10. #include <uapi/linux/wait.h>
  11. typedef struct __wait_queue wait_queue_t;
  12. typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
  13. int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key);
  14. struct __wait_queue {
  15. unsigned int flags;
  16. #define WQ_FLAG_EXCLUSIVE 0x01
  17. void *private;
  18. wait_queue_func_t func;
  19. struct list_head task_list;
  20. };
  21. struct wait_bit_key {
  22. void *flags;
  23. int bit_nr;
  24. #define WAIT_ATOMIC_T_BIT_NR -1
  25. unsigned long timeout;
  26. };
  27. struct wait_bit_queue {
  28. struct wait_bit_key key;
  29. wait_queue_t wait;
  30. };
  31. struct __wait_queue_head {
  32. spinlock_t lock;
  33. struct list_head task_list;
  34. };
  35. typedef struct __wait_queue_head wait_queue_head_t;
  36. struct task_struct;
  37. /*
  38. * Macros for declaration and initialisaton of the datatypes
  39. */
  40. #define __WAITQUEUE_INITIALIZER(name, tsk) { \
  41. .private = tsk, \
  42. .func = default_wake_function, \
  43. .task_list = { NULL, NULL } }
  44. #define DECLARE_WAITQUEUE(name, tsk) \
  45. wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)
  46. #define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
  47. .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
  48. .task_list = { &(name).task_list, &(name).task_list } }
  49. #define DECLARE_WAIT_QUEUE_HEAD(name) \
  50. wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
  51. #define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
  52. { .flags = word, .bit_nr = bit, }
  53. #define __WAIT_ATOMIC_T_KEY_INITIALIZER(p) \
  54. { .flags = p, .bit_nr = WAIT_ATOMIC_T_BIT_NR, }
  55. extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *);
  56. #define init_waitqueue_head(q) \
  57. do { \
  58. static struct lock_class_key __key; \
  59. \
  60. __init_waitqueue_head((q), #q, &__key); \
  61. } while (0)
  62. #ifdef CONFIG_LOCKDEP
  63. # define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
  64. ({ init_waitqueue_head(&name); name; })
  65. # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
  66. wait_queue_head_t name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
  67. #else
  68. # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
  69. #endif
  70. static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
  71. {
  72. q->flags = 0;
  73. q->private = p;
  74. q->func = default_wake_function;
  75. }
  76. static inline void
  77. init_waitqueue_func_entry(wait_queue_t *q, wait_queue_func_t func)
  78. {
  79. q->flags = 0;
  80. q->private = NULL;
  81. q->func = func;
  82. }
  83. static inline int waitqueue_active(wait_queue_head_t *q)
  84. {
  85. return !list_empty(&q->task_list);
  86. }
  87. extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
  88. extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait);
  89. extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
  90. static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
  91. {
  92. list_add(&new->task_list, &head->task_list);
  93. }
  94. /*
  95. * Used for wake-one threads:
  96. */
  97. static inline void
  98. __add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
  99. {
  100. wait->flags |= WQ_FLAG_EXCLUSIVE;
  101. __add_wait_queue(q, wait);
  102. }
  103. static inline void __add_wait_queue_tail(wait_queue_head_t *head,
  104. wait_queue_t *new)
  105. {
  106. list_add_tail(&new->task_list, &head->task_list);
  107. }
  108. static inline void
  109. __add_wait_queue_tail_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
  110. {
  111. wait->flags |= WQ_FLAG_EXCLUSIVE;
  112. __add_wait_queue_tail(q, wait);
  113. }
  114. static inline void
  115. __remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old)
  116. {
  117. list_del(&old->task_list);
  118. }
  119. typedef int wait_bit_action_f(struct wait_bit_key *);
  120. void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
  121. void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
  122. void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
  123. void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr);
  124. void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
  125. void __wake_up_bit(wait_queue_head_t *, void *, int);
  126. int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned);
  127. int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned);
  128. void wake_up_bit(void *, int);
  129. void wake_up_atomic_t(atomic_t *);
  130. int out_of_line_wait_on_bit(void *, int, wait_bit_action_f *, unsigned);
  131. int out_of_line_wait_on_bit_timeout(void *, int, wait_bit_action_f *, unsigned, unsigned long);
  132. int out_of_line_wait_on_bit_lock(void *, int, wait_bit_action_f *, unsigned);
  133. int out_of_line_wait_on_atomic_t(atomic_t *, int (*)(atomic_t *), unsigned);
  134. wait_queue_head_t *bit_waitqueue(void *, int);
  135. #define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
  136. #define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
  137. #define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL)
  138. #define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1)
  139. #define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0)
  140. #define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
  141. #define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
  142. #define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
  143. #define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
  144. /*
  145. * Wakeup macros to be used to report events to the targets.
  146. */
  147. #define wake_up_poll(x, m) \
  148. __wake_up(x, TASK_NORMAL, 1, (void *) (m))
  149. #define wake_up_locked_poll(x, m) \
  150. __wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
  151. #define wake_up_interruptible_poll(x, m) \
  152. __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
  153. #define wake_up_interruptible_sync_poll(x, m) \
  154. __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))
  155. #define ___wait_cond_timeout(condition) \
  156. ({ \
  157. bool __cond = (condition); \
  158. if (__cond && !__ret) \
  159. __ret = 1; \
  160. __cond || !__ret; \
  161. })
  162. #define ___wait_is_interruptible(state) \
  163. (!__builtin_constant_p(state) || \
  164. state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE) \
  165. /*
  166. * The below macro ___wait_event() has an explicit shadow of the __ret
  167. * variable when used from the wait_event_*() macros.
  168. *
  169. * This is so that both can use the ___wait_cond_timeout() construct
  170. * to wrap the condition.
  171. *
  172. * The type inconsistency of the wait_event_*() __ret variable is also
  173. * on purpose; we use long where we can return timeout values and int
  174. * otherwise.
  175. */
  176. #define ___wait_event(wq, condition, state, exclusive, ret, cmd) \
  177. ({ \
  178. __label__ __out; \
  179. wait_queue_t __wait; \
  180. long __ret = ret; /* explicit shadow */ \
  181. \
  182. INIT_LIST_HEAD(&__wait.task_list); \
  183. if (exclusive) \
  184. __wait.flags = WQ_FLAG_EXCLUSIVE; \
  185. else \
  186. __wait.flags = 0; \
  187. \
  188. for (;;) { \
  189. long __int = prepare_to_wait_event(&wq, &__wait, state);\
  190. \
  191. if (condition) \
  192. break; \
  193. \
  194. if (___wait_is_interruptible(state) && __int) { \
  195. __ret = __int; \
  196. if (exclusive) { \
  197. abort_exclusive_wait(&wq, &__wait, \
  198. state, NULL); \
  199. goto __out; \
  200. } \
  201. break; \
  202. } \
  203. \
  204. cmd; \
  205. } \
  206. finish_wait(&wq, &__wait); \
  207. __out: __ret; \
  208. })
  209. #define __wait_event(wq, condition) \
  210. (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
  211. schedule())
  212. /**
  213. * wait_event - sleep until a condition gets true
  214. * @wq: the waitqueue to wait on
  215. * @condition: a C expression for the event to wait for
  216. *
  217. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  218. * @condition evaluates to true. The @condition is checked each time
  219. * the waitqueue @wq is woken up.
  220. *
  221. * wake_up() has to be called after changing any variable that could
  222. * change the result of the wait condition.
  223. */
  224. #define wait_event(wq, condition) \
  225. do { \
  226. if (condition) \
  227. break; \
  228. __wait_event(wq, condition); \
  229. } while (0)
  230. #define __wait_event_timeout(wq, condition, timeout) \
  231. ___wait_event(wq, ___wait_cond_timeout(condition), \
  232. TASK_UNINTERRUPTIBLE, 0, timeout, \
  233. __ret = schedule_timeout(__ret))
  234. /**
  235. * wait_event_timeout - sleep until a condition gets true or a timeout elapses
  236. * @wq: the waitqueue to wait on
  237. * @condition: a C expression for the event to wait for
  238. * @timeout: timeout, in jiffies
  239. *
  240. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  241. * @condition evaluates to true. The @condition is checked each time
  242. * the waitqueue @wq is woken up.
  243. *
  244. * wake_up() has to be called after changing any variable that could
  245. * change the result of the wait condition.
  246. *
  247. * The function returns 0 if the @timeout elapsed, or the remaining
  248. * jiffies (at least 1) if the @condition evaluated to %true before
  249. * the @timeout elapsed.
  250. */
  251. #define wait_event_timeout(wq, condition, timeout) \
  252. ({ \
  253. long __ret = timeout; \
  254. if (!___wait_cond_timeout(condition)) \
  255. __ret = __wait_event_timeout(wq, condition, timeout); \
  256. __ret; \
  257. })
  258. #define __wait_event_cmd(wq, condition, cmd1, cmd2) \
  259. (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
  260. cmd1; schedule(); cmd2)
  261. /**
  262. * wait_event_cmd - sleep until a condition gets true
  263. * @wq: the waitqueue to wait on
  264. * @condition: a C expression for the event to wait for
  265. * @cmd1: the command will be executed before sleep
  266. * @cmd2: the command will be executed after sleep
  267. *
  268. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  269. * @condition evaluates to true. The @condition is checked each time
  270. * the waitqueue @wq is woken up.
  271. *
  272. * wake_up() has to be called after changing any variable that could
  273. * change the result of the wait condition.
  274. */
  275. #define wait_event_cmd(wq, condition, cmd1, cmd2) \
  276. do { \
  277. if (condition) \
  278. break; \
  279. __wait_event_cmd(wq, condition, cmd1, cmd2); \
  280. } while (0)
  281. #define __wait_event_interruptible(wq, condition) \
  282. ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
  283. schedule())
  284. /**
  285. * wait_event_interruptible - sleep until a condition gets true
  286. * @wq: the waitqueue to wait on
  287. * @condition: a C expression for the event to wait for
  288. *
  289. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  290. * @condition evaluates to true or a signal is received.
  291. * The @condition is checked each time the waitqueue @wq is woken up.
  292. *
  293. * wake_up() has to be called after changing any variable that could
  294. * change the result of the wait condition.
  295. *
  296. * The function will return -ERESTARTSYS if it was interrupted by a
  297. * signal and 0 if @condition evaluated to true.
  298. */
  299. #define wait_event_interruptible(wq, condition) \
  300. ({ \
  301. int __ret = 0; \
  302. if (!(condition)) \
  303. __ret = __wait_event_interruptible(wq, condition); \
  304. __ret; \
  305. })
  306. #define __wait_event_interruptible_timeout(wq, condition, timeout) \
  307. ___wait_event(wq, ___wait_cond_timeout(condition), \
  308. TASK_INTERRUPTIBLE, 0, timeout, \
  309. __ret = schedule_timeout(__ret))
  310. /**
  311. * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
  312. * @wq: the waitqueue to wait on
  313. * @condition: a C expression for the event to wait for
  314. * @timeout: timeout, in jiffies
  315. *
  316. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  317. * @condition evaluates to true or a signal is received.
  318. * The @condition is checked each time the waitqueue @wq is woken up.
  319. *
  320. * wake_up() has to be called after changing any variable that could
  321. * change the result of the wait condition.
  322. *
  323. * Returns:
  324. * 0 if the @timeout elapsed, -%ERESTARTSYS if it was interrupted by
  325. * a signal, or the remaining jiffies (at least 1) if the @condition
  326. * evaluated to %true before the @timeout elapsed.
  327. */
  328. #define wait_event_interruptible_timeout(wq, condition, timeout) \
  329. ({ \
  330. long __ret = timeout; \
  331. if (!___wait_cond_timeout(condition)) \
  332. __ret = __wait_event_interruptible_timeout(wq, \
  333. condition, timeout); \
  334. __ret; \
  335. })
  336. #define __wait_event_hrtimeout(wq, condition, timeout, state) \
  337. ({ \
  338. int __ret = 0; \
  339. struct hrtimer_sleeper __t; \
  340. \
  341. hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC, \
  342. HRTIMER_MODE_REL); \
  343. hrtimer_init_sleeper(&__t, current); \
  344. if ((timeout).tv64 != KTIME_MAX) \
  345. hrtimer_start_range_ns(&__t.timer, timeout, \
  346. current->timer_slack_ns, \
  347. HRTIMER_MODE_REL); \
  348. \
  349. __ret = ___wait_event(wq, condition, state, 0, 0, \
  350. if (!__t.task) { \
  351. __ret = -ETIME; \
  352. break; \
  353. } \
  354. schedule()); \
  355. \
  356. hrtimer_cancel(&__t.timer); \
  357. destroy_hrtimer_on_stack(&__t.timer); \
  358. __ret; \
  359. })
  360. /**
  361. * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses
  362. * @wq: the waitqueue to wait on
  363. * @condition: a C expression for the event to wait for
  364. * @timeout: timeout, as a ktime_t
  365. *
  366. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  367. * @condition evaluates to true or a signal is received.
  368. * The @condition is checked each time the waitqueue @wq is woken up.
  369. *
  370. * wake_up() has to be called after changing any variable that could
  371. * change the result of the wait condition.
  372. *
  373. * The function returns 0 if @condition became true, or -ETIME if the timeout
  374. * elapsed.
  375. */
  376. #define wait_event_hrtimeout(wq, condition, timeout) \
  377. ({ \
  378. int __ret = 0; \
  379. if (!(condition)) \
  380. __ret = __wait_event_hrtimeout(wq, condition, timeout, \
  381. TASK_UNINTERRUPTIBLE); \
  382. __ret; \
  383. })
  384. /**
  385. * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
  386. * @wq: the waitqueue to wait on
  387. * @condition: a C expression for the event to wait for
  388. * @timeout: timeout, as a ktime_t
  389. *
  390. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  391. * @condition evaluates to true or a signal is received.
  392. * The @condition is checked each time the waitqueue @wq is woken up.
  393. *
  394. * wake_up() has to be called after changing any variable that could
  395. * change the result of the wait condition.
  396. *
  397. * The function returns 0 if @condition became true, -ERESTARTSYS if it was
  398. * interrupted by a signal, or -ETIME if the timeout elapsed.
  399. */
  400. #define wait_event_interruptible_hrtimeout(wq, condition, timeout) \
  401. ({ \
  402. long __ret = 0; \
  403. if (!(condition)) \
  404. __ret = __wait_event_hrtimeout(wq, condition, timeout, \
  405. TASK_INTERRUPTIBLE); \
  406. __ret; \
  407. })
  408. #define __wait_event_interruptible_exclusive(wq, condition) \
  409. ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
  410. schedule())
  411. #define wait_event_interruptible_exclusive(wq, condition) \
  412. ({ \
  413. int __ret = 0; \
  414. if (!(condition)) \
  415. __ret = __wait_event_interruptible_exclusive(wq, condition);\
  416. __ret; \
  417. })
  418. #define __wait_event_interruptible_locked(wq, condition, exclusive, irq) \
  419. ({ \
  420. int __ret = 0; \
  421. DEFINE_WAIT(__wait); \
  422. if (exclusive) \
  423. __wait.flags |= WQ_FLAG_EXCLUSIVE; \
  424. do { \
  425. if (likely(list_empty(&__wait.task_list))) \
  426. __add_wait_queue_tail(&(wq), &__wait); \
  427. set_current_state(TASK_INTERRUPTIBLE); \
  428. if (signal_pending(current)) { \
  429. __ret = -ERESTARTSYS; \
  430. break; \
  431. } \
  432. if (irq) \
  433. spin_unlock_irq(&(wq).lock); \
  434. else \
  435. spin_unlock(&(wq).lock); \
  436. schedule(); \
  437. if (irq) \
  438. spin_lock_irq(&(wq).lock); \
  439. else \
  440. spin_lock(&(wq).lock); \
  441. } while (!(condition)); \
  442. __remove_wait_queue(&(wq), &__wait); \
  443. __set_current_state(TASK_RUNNING); \
  444. __ret; \
  445. })
  446. /**
  447. * wait_event_interruptible_locked - sleep until a condition gets true
  448. * @wq: the waitqueue to wait on
  449. * @condition: a C expression for the event to wait for
  450. *
  451. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  452. * @condition evaluates to true or a signal is received.
  453. * The @condition is checked each time the waitqueue @wq is woken up.
  454. *
  455. * It must be called with wq.lock being held. This spinlock is
  456. * unlocked while sleeping but @condition testing is done while lock
  457. * is held and when this macro exits the lock is held.
  458. *
  459. * The lock is locked/unlocked using spin_lock()/spin_unlock()
  460. * functions which must match the way they are locked/unlocked outside
  461. * of this macro.
  462. *
  463. * wake_up_locked() has to be called after changing any variable that could
  464. * change the result of the wait condition.
  465. *
  466. * The function will return -ERESTARTSYS if it was interrupted by a
  467. * signal and 0 if @condition evaluated to true.
  468. */
  469. #define wait_event_interruptible_locked(wq, condition) \
  470. ((condition) \
  471. ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 0))
  472. /**
  473. * wait_event_interruptible_locked_irq - sleep until a condition gets true
  474. * @wq: the waitqueue to wait on
  475. * @condition: a C expression for the event to wait for
  476. *
  477. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  478. * @condition evaluates to true or a signal is received.
  479. * The @condition is checked each time the waitqueue @wq is woken up.
  480. *
  481. * It must be called with wq.lock being held. This spinlock is
  482. * unlocked while sleeping but @condition testing is done while lock
  483. * is held and when this macro exits the lock is held.
  484. *
  485. * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
  486. * functions which must match the way they are locked/unlocked outside
  487. * of this macro.
  488. *
  489. * wake_up_locked() has to be called after changing any variable that could
  490. * change the result of the wait condition.
  491. *
  492. * The function will return -ERESTARTSYS if it was interrupted by a
  493. * signal and 0 if @condition evaluated to true.
  494. */
  495. #define wait_event_interruptible_locked_irq(wq, condition) \
  496. ((condition) \
  497. ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 1))
  498. /**
  499. * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
  500. * @wq: the waitqueue to wait on
  501. * @condition: a C expression for the event to wait for
  502. *
  503. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  504. * @condition evaluates to true or a signal is received.
  505. * The @condition is checked each time the waitqueue @wq is woken up.
  506. *
  507. * It must be called with wq.lock being held. This spinlock is
  508. * unlocked while sleeping but @condition testing is done while lock
  509. * is held and when this macro exits the lock is held.
  510. *
  511. * The lock is locked/unlocked using spin_lock()/spin_unlock()
  512. * functions which must match the way they are locked/unlocked outside
  513. * of this macro.
  514. *
  515. * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
  516. * set thus when other process waits process on the list if this
  517. * process is awaken further processes are not considered.
  518. *
  519. * wake_up_locked() has to be called after changing any variable that could
  520. * change the result of the wait condition.
  521. *
  522. * The function will return -ERESTARTSYS if it was interrupted by a
  523. * signal and 0 if @condition evaluated to true.
  524. */
  525. #define wait_event_interruptible_exclusive_locked(wq, condition) \
  526. ((condition) \
  527. ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 0))
  528. /**
  529. * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
  530. * @wq: the waitqueue to wait on
  531. * @condition: a C expression for the event to wait for
  532. *
  533. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  534. * @condition evaluates to true or a signal is received.
  535. * The @condition is checked each time the waitqueue @wq is woken up.
  536. *
  537. * It must be called with wq.lock being held. This spinlock is
  538. * unlocked while sleeping but @condition testing is done while lock
  539. * is held and when this macro exits the lock is held.
  540. *
  541. * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
  542. * functions which must match the way they are locked/unlocked outside
  543. * of this macro.
  544. *
  545. * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
  546. * set thus when other process waits process on the list if this
  547. * process is awaken further processes are not considered.
  548. *
  549. * wake_up_locked() has to be called after changing any variable that could
  550. * change the result of the wait condition.
  551. *
  552. * The function will return -ERESTARTSYS if it was interrupted by a
  553. * signal and 0 if @condition evaluated to true.
  554. */
  555. #define wait_event_interruptible_exclusive_locked_irq(wq, condition) \
  556. ((condition) \
  557. ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1))
  558. #define __wait_event_killable(wq, condition) \
  559. ___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
  560. /**
  561. * wait_event_killable - sleep until a condition gets true
  562. * @wq: the waitqueue to wait on
  563. * @condition: a C expression for the event to wait for
  564. *
  565. * The process is put to sleep (TASK_KILLABLE) until the
  566. * @condition evaluates to true or a signal is received.
  567. * The @condition is checked each time the waitqueue @wq is woken up.
  568. *
  569. * wake_up() has to be called after changing any variable that could
  570. * change the result of the wait condition.
  571. *
  572. * The function will return -ERESTARTSYS if it was interrupted by a
  573. * signal and 0 if @condition evaluated to true.
  574. */
  575. #define wait_event_killable(wq, condition) \
  576. ({ \
  577. int __ret = 0; \
  578. if (!(condition)) \
  579. __ret = __wait_event_killable(wq, condition); \
  580. __ret; \
  581. })
  582. #define __wait_event_lock_irq(wq, condition, lock, cmd) \
  583. (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
  584. spin_unlock_irq(&lock); \
  585. cmd; \
  586. schedule(); \
  587. spin_lock_irq(&lock))
  588. /**
  589. * wait_event_lock_irq_cmd - sleep until a condition gets true. The
  590. * condition is checked under the lock. This
  591. * is expected to be called with the lock
  592. * taken.
  593. * @wq: the waitqueue to wait on
  594. * @condition: a C expression for the event to wait for
  595. * @lock: a locked spinlock_t, which will be released before cmd
  596. * and schedule() and reacquired afterwards.
  597. * @cmd: a command which is invoked outside the critical section before
  598. * sleep
  599. *
  600. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  601. * @condition evaluates to true. The @condition is checked each time
  602. * the waitqueue @wq is woken up.
  603. *
  604. * wake_up() has to be called after changing any variable that could
  605. * change the result of the wait condition.
  606. *
  607. * This is supposed to be called while holding the lock. The lock is
  608. * dropped before invoking the cmd and going to sleep and is reacquired
  609. * afterwards.
  610. */
  611. #define wait_event_lock_irq_cmd(wq, condition, lock, cmd) \
  612. do { \
  613. if (condition) \
  614. break; \
  615. __wait_event_lock_irq(wq, condition, lock, cmd); \
  616. } while (0)
  617. /**
  618. * wait_event_lock_irq - sleep until a condition gets true. The
  619. * condition is checked under the lock. This
  620. * is expected to be called with the lock
  621. * taken.
  622. * @wq: the waitqueue to wait on
  623. * @condition: a C expression for the event to wait for
  624. * @lock: a locked spinlock_t, which will be released before schedule()
  625. * and reacquired afterwards.
  626. *
  627. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  628. * @condition evaluates to true. The @condition is checked each time
  629. * the waitqueue @wq is woken up.
  630. *
  631. * wake_up() has to be called after changing any variable that could
  632. * change the result of the wait condition.
  633. *
  634. * This is supposed to be called while holding the lock. The lock is
  635. * dropped before going to sleep and is reacquired afterwards.
  636. */
  637. #define wait_event_lock_irq(wq, condition, lock) \
  638. do { \
  639. if (condition) \
  640. break; \
  641. __wait_event_lock_irq(wq, condition, lock, ); \
  642. } while (0)
  643. #define __wait_event_interruptible_lock_irq(wq, condition, lock, cmd) \
  644. ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
  645. spin_unlock_irq(&lock); \
  646. cmd; \
  647. schedule(); \
  648. spin_lock_irq(&lock))
  649. /**
  650. * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
  651. * The condition is checked under the lock. This is expected to
  652. * be called with the lock taken.
  653. * @wq: the waitqueue to wait on
  654. * @condition: a C expression for the event to wait for
  655. * @lock: a locked spinlock_t, which will be released before cmd and
  656. * schedule() and reacquired afterwards.
  657. * @cmd: a command which is invoked outside the critical section before
  658. * sleep
  659. *
  660. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  661. * @condition evaluates to true or a signal is received. The @condition is
  662. * checked each time the waitqueue @wq is woken up.
  663. *
  664. * wake_up() has to be called after changing any variable that could
  665. * change the result of the wait condition.
  666. *
  667. * This is supposed to be called while holding the lock. The lock is
  668. * dropped before invoking the cmd and going to sleep and is reacquired
  669. * afterwards.
  670. *
  671. * The macro will return -ERESTARTSYS if it was interrupted by a signal
  672. * and 0 if @condition evaluated to true.
  673. */
  674. #define wait_event_interruptible_lock_irq_cmd(wq, condition, lock, cmd) \
  675. ({ \
  676. int __ret = 0; \
  677. if (!(condition)) \
  678. __ret = __wait_event_interruptible_lock_irq(wq, \
  679. condition, lock, cmd); \
  680. __ret; \
  681. })
  682. /**
  683. * wait_event_interruptible_lock_irq - sleep until a condition gets true.
  684. * The condition is checked under the lock. This is expected
  685. * to be called with the lock taken.
  686. * @wq: the waitqueue to wait on
  687. * @condition: a C expression for the event to wait for
  688. * @lock: a locked spinlock_t, which will be released before schedule()
  689. * and reacquired afterwards.
  690. *
  691. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  692. * @condition evaluates to true or signal is received. The @condition is
  693. * checked each time the waitqueue @wq is woken up.
  694. *
  695. * wake_up() has to be called after changing any variable that could
  696. * change the result of the wait condition.
  697. *
  698. * This is supposed to be called while holding the lock. The lock is
  699. * dropped before going to sleep and is reacquired afterwards.
  700. *
  701. * The macro will return -ERESTARTSYS if it was interrupted by a signal
  702. * and 0 if @condition evaluated to true.
  703. */
  704. #define wait_event_interruptible_lock_irq(wq, condition, lock) \
  705. ({ \
  706. int __ret = 0; \
  707. if (!(condition)) \
  708. __ret = __wait_event_interruptible_lock_irq(wq, \
  709. condition, lock,); \
  710. __ret; \
  711. })
  712. #define __wait_event_interruptible_lock_irq_timeout(wq, condition, \
  713. lock, timeout) \
  714. ___wait_event(wq, ___wait_cond_timeout(condition), \
  715. TASK_INTERRUPTIBLE, 0, timeout, \
  716. spin_unlock_irq(&lock); \
  717. __ret = schedule_timeout(__ret); \
  718. spin_lock_irq(&lock));
  719. /**
  720. * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets
  721. * true or a timeout elapses. The condition is checked under
  722. * the lock. This is expected to be called with the lock taken.
  723. * @wq: the waitqueue to wait on
  724. * @condition: a C expression for the event to wait for
  725. * @lock: a locked spinlock_t, which will be released before schedule()
  726. * and reacquired afterwards.
  727. * @timeout: timeout, in jiffies
  728. *
  729. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  730. * @condition evaluates to true or signal is received. The @condition is
  731. * checked each time the waitqueue @wq is woken up.
  732. *
  733. * wake_up() has to be called after changing any variable that could
  734. * change the result of the wait condition.
  735. *
  736. * This is supposed to be called while holding the lock. The lock is
  737. * dropped before going to sleep and is reacquired afterwards.
  738. *
  739. * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
  740. * was interrupted by a signal, and the remaining jiffies otherwise
  741. * if the condition evaluated to true before the timeout elapsed.
  742. */
  743. #define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \
  744. timeout) \
  745. ({ \
  746. long __ret = timeout; \
  747. if (!___wait_cond_timeout(condition)) \
  748. __ret = __wait_event_interruptible_lock_irq_timeout( \
  749. wq, condition, lock, timeout); \
  750. __ret; \
  751. })
  752. /*
  753. * Waitqueues which are removed from the waitqueue_head at wakeup time
  754. */
  755. void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
  756. void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state);
  757. long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state);
  758. void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
  759. void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, unsigned int mode, void *key);
  760. int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
  761. int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
  762. #define DEFINE_WAIT_FUNC(name, function) \
  763. wait_queue_t name = { \
  764. .private = current, \
  765. .func = function, \
  766. .task_list = LIST_HEAD_INIT((name).task_list), \
  767. }
  768. #define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
  769. #define DEFINE_WAIT_BIT(name, word, bit) \
  770. struct wait_bit_queue name = { \
  771. .key = __WAIT_BIT_KEY_INITIALIZER(word, bit), \
  772. .wait = { \
  773. .private = current, \
  774. .func = wake_bit_function, \
  775. .task_list = \
  776. LIST_HEAD_INIT((name).wait.task_list), \
  777. }, \
  778. }
  779. #define init_wait(wait) \
  780. do { \
  781. (wait)->private = current; \
  782. (wait)->func = autoremove_wake_function; \
  783. INIT_LIST_HEAD(&(wait)->task_list); \
  784. (wait)->flags = 0; \
  785. } while (0)
  786. extern int bit_wait(struct wait_bit_key *);
  787. extern int bit_wait_io(struct wait_bit_key *);
  788. extern int bit_wait_timeout(struct wait_bit_key *);
  789. extern int bit_wait_io_timeout(struct wait_bit_key *);
  790. /**
  791. * wait_on_bit - wait for a bit to be cleared
  792. * @word: the word being waited on, a kernel virtual address
  793. * @bit: the bit of the word being waited on
  794. * @mode: the task state to sleep in
  795. *
  796. * There is a standard hashed waitqueue table for generic use. This
  797. * is the part of the hashtable's accessor API that waits on a bit.
  798. * For instance, if one were to have waiters on a bitflag, one would
  799. * call wait_on_bit() in threads waiting for the bit to clear.
  800. * One uses wait_on_bit() where one is waiting for the bit to clear,
  801. * but has no intention of setting it.
  802. * Returned value will be zero if the bit was cleared, or non-zero
  803. * if the process received a signal and the mode permitted wakeup
  804. * on that signal.
  805. */
  806. static inline int
  807. wait_on_bit(void *word, int bit, unsigned mode)
  808. {
  809. if (!test_bit(bit, word))
  810. return 0;
  811. return out_of_line_wait_on_bit(word, bit,
  812. bit_wait,
  813. mode);
  814. }
  815. /**
  816. * wait_on_bit_io - wait for a bit to be cleared
  817. * @word: the word being waited on, a kernel virtual address
  818. * @bit: the bit of the word being waited on
  819. * @mode: the task state to sleep in
  820. *
  821. * Use the standard hashed waitqueue table to wait for a bit
  822. * to be cleared. This is similar to wait_on_bit(), but calls
  823. * io_schedule() instead of schedule() for the actual waiting.
  824. *
  825. * Returned value will be zero if the bit was cleared, or non-zero
  826. * if the process received a signal and the mode permitted wakeup
  827. * on that signal.
  828. */
  829. static inline int
  830. wait_on_bit_io(void *word, int bit, unsigned mode)
  831. {
  832. if (!test_bit(bit, word))
  833. return 0;
  834. return out_of_line_wait_on_bit(word, bit,
  835. bit_wait_io,
  836. mode);
  837. }
  838. /**
  839. * wait_on_bit_action - wait for a bit to be cleared
  840. * @word: the word being waited on, a kernel virtual address
  841. * @bit: the bit of the word being waited on
  842. * @action: the function used to sleep, which may take special actions
  843. * @mode: the task state to sleep in
  844. *
  845. * Use the standard hashed waitqueue table to wait for a bit
  846. * to be cleared, and allow the waiting action to be specified.
  847. * This is like wait_on_bit() but allows fine control of how the waiting
  848. * is done.
  849. *
  850. * Returned value will be zero if the bit was cleared, or non-zero
  851. * if the process received a signal and the mode permitted wakeup
  852. * on that signal.
  853. */
  854. static inline int
  855. wait_on_bit_action(void *word, int bit, wait_bit_action_f *action, unsigned mode)
  856. {
  857. if (!test_bit(bit, word))
  858. return 0;
  859. return out_of_line_wait_on_bit(word, bit, action, mode);
  860. }
  861. /**
  862. * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it
  863. * @word: the word being waited on, a kernel virtual address
  864. * @bit: the bit of the word being waited on
  865. * @mode: the task state to sleep in
  866. *
  867. * There is a standard hashed waitqueue table for generic use. This
  868. * is the part of the hashtable's accessor API that waits on a bit
  869. * when one intends to set it, for instance, trying to lock bitflags.
  870. * For instance, if one were to have waiters trying to set bitflag
  871. * and waiting for it to clear before setting it, one would call
  872. * wait_on_bit() in threads waiting to be able to set the bit.
  873. * One uses wait_on_bit_lock() where one is waiting for the bit to
  874. * clear with the intention of setting it, and when done, clearing it.
  875. *
  876. * Returns zero if the bit was (eventually) found to be clear and was
  877. * set. Returns non-zero if a signal was delivered to the process and
  878. * the @mode allows that signal to wake the process.
  879. */
  880. static inline int
  881. wait_on_bit_lock(void *word, int bit, unsigned mode)
  882. {
  883. if (!test_and_set_bit(bit, word))
  884. return 0;
  885. return out_of_line_wait_on_bit_lock(word, bit, bit_wait, mode);
  886. }
  887. /**
  888. * wait_on_bit_lock_io - wait for a bit to be cleared, when wanting to set it
  889. * @word: the word being waited on, a kernel virtual address
  890. * @bit: the bit of the word being waited on
  891. * @mode: the task state to sleep in
  892. *
  893. * Use the standard hashed waitqueue table to wait for a bit
  894. * to be cleared and then to atomically set it. This is similar
  895. * to wait_on_bit(), but calls io_schedule() instead of schedule()
  896. * for the actual waiting.
  897. *
  898. * Returns zero if the bit was (eventually) found to be clear and was
  899. * set. Returns non-zero if a signal was delivered to the process and
  900. * the @mode allows that signal to wake the process.
  901. */
  902. static inline int
  903. wait_on_bit_lock_io(void *word, int bit, unsigned mode)
  904. {
  905. if (!test_and_set_bit(bit, word))
  906. return 0;
  907. return out_of_line_wait_on_bit_lock(word, bit, bit_wait_io, mode);
  908. }
  909. /**
  910. * wait_on_bit_lock_action - wait for a bit to be cleared, when wanting to set it
  911. * @word: the word being waited on, a kernel virtual address
  912. * @bit: the bit of the word being waited on
  913. * @action: the function used to sleep, which may take special actions
  914. * @mode: the task state to sleep in
  915. *
  916. * Use the standard hashed waitqueue table to wait for a bit
  917. * to be cleared and then to set it, and allow the waiting action
  918. * to be specified.
  919. * This is like wait_on_bit() but allows fine control of how the waiting
  920. * is done.
  921. *
  922. * Returns zero if the bit was (eventually) found to be clear and was
  923. * set. Returns non-zero if a signal was delivered to the process and
  924. * the @mode allows that signal to wake the process.
  925. */
  926. static inline int
  927. wait_on_bit_lock_action(void *word, int bit, wait_bit_action_f *action, unsigned mode)
  928. {
  929. if (!test_and_set_bit(bit, word))
  930. return 0;
  931. return out_of_line_wait_on_bit_lock(word, bit, action, mode);
  932. }
  933. /**
  934. * wait_on_atomic_t - Wait for an atomic_t to become 0
  935. * @val: The atomic value being waited on, a kernel virtual address
  936. * @action: the function used to sleep, which may take special actions
  937. * @mode: the task state to sleep in
  938. *
  939. * Wait for an atomic_t to become 0. We abuse the bit-wait waitqueue table for
  940. * the purpose of getting a waitqueue, but we set the key to a bit number
  941. * outside of the target 'word'.
  942. */
  943. static inline
  944. int wait_on_atomic_t(atomic_t *val, int (*action)(atomic_t *), unsigned mode)
  945. {
  946. if (atomic_read(val) == 0)
  947. return 0;
  948. return out_of_line_wait_on_atomic_t(val, action, mode);
  949. }
  950. #endif /* _LINUX_WAIT_H */