wait.h 41 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222
  1. #ifndef _LINUX_WAIT_H
  2. #define _LINUX_WAIT_H
  3. /*
  4. * Linux wait queue related types and methods
  5. */
  6. #include <linux/list.h>
  7. #include <linux/stddef.h>
  8. #include <linux/spinlock.h>
  9. #include <asm/current.h>
  10. #include <uapi/linux/wait.h>
  11. typedef struct __wait_queue wait_queue_t;
  12. typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
  13. int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key);
  14. /* __wait_queue::flags */
  15. #define WQ_FLAG_EXCLUSIVE 0x01
  16. #define WQ_FLAG_WOKEN 0x02
  17. struct __wait_queue {
  18. unsigned int flags;
  19. void *private;
  20. wait_queue_func_t func;
  21. struct list_head task_list;
  22. };
  23. struct wait_bit_key {
  24. void *flags;
  25. int bit_nr;
  26. #define WAIT_ATOMIC_T_BIT_NR -1
  27. unsigned long timeout;
  28. };
  29. struct wait_bit_queue {
  30. struct wait_bit_key key;
  31. wait_queue_t wait;
  32. };
  33. struct __wait_queue_head {
  34. spinlock_t lock;
  35. struct list_head task_list;
  36. };
  37. typedef struct __wait_queue_head wait_queue_head_t;
  38. struct task_struct;
  39. /*
  40. * Macros for declaration and initialisaton of the datatypes
  41. */
  42. #define __WAITQUEUE_INITIALIZER(name, tsk) { \
  43. .private = tsk, \
  44. .func = default_wake_function, \
  45. .task_list = { NULL, NULL } }
  46. #define DECLARE_WAITQUEUE(name, tsk) \
  47. wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)
  48. #define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
  49. .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
  50. .task_list = { &(name).task_list, &(name).task_list } }
  51. #define DECLARE_WAIT_QUEUE_HEAD(name) \
  52. wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
  53. #define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
  54. { .flags = word, .bit_nr = bit, }
  55. #define __WAIT_ATOMIC_T_KEY_INITIALIZER(p) \
  56. { .flags = p, .bit_nr = WAIT_ATOMIC_T_BIT_NR, }
  57. extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *);
  58. #define init_waitqueue_head(q) \
  59. do { \
  60. static struct lock_class_key __key; \
  61. \
  62. __init_waitqueue_head((q), #q, &__key); \
  63. } while (0)
  64. #ifdef CONFIG_LOCKDEP
  65. # define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
  66. ({ init_waitqueue_head(&name); name; })
  67. # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
  68. wait_queue_head_t name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
  69. #else
  70. # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
  71. #endif
  72. static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
  73. {
  74. q->flags = 0;
  75. q->private = p;
  76. q->func = default_wake_function;
  77. }
  78. static inline void
  79. init_waitqueue_func_entry(wait_queue_t *q, wait_queue_func_t func)
  80. {
  81. q->flags = 0;
  82. q->private = NULL;
  83. q->func = func;
  84. }
  85. /**
  86. * waitqueue_active -- locklessly test for waiters on the queue
  87. * @q: the waitqueue to test for waiters
  88. *
  89. * returns true if the wait list is not empty
  90. *
  91. * NOTE: this function is lockless and requires care, incorrect usage _will_
  92. * lead to sporadic and non-obvious failure.
  93. *
  94. * Use either while holding wait_queue_head_t::lock or when used for wakeups
  95. * with an extra smp_mb() like:
  96. *
  97. * CPU0 - waker CPU1 - waiter
  98. *
  99. * for (;;) {
  100. * @cond = true; prepare_to_wait(&wq, &wait, state);
  101. * smp_mb(); // smp_mb() from set_current_state()
  102. * if (waitqueue_active(wq)) if (@cond)
  103. * wake_up(wq); break;
  104. * schedule();
  105. * }
  106. * finish_wait(&wq, &wait);
  107. *
  108. * Because without the explicit smp_mb() it's possible for the
  109. * waitqueue_active() load to get hoisted over the @cond store such that we'll
  110. * observe an empty wait list while the waiter might not observe @cond.
  111. *
  112. * Also note that this 'optimization' trades a spin_lock() for an smp_mb(),
  113. * which (when the lock is uncontended) are of roughly equal cost.
  114. */
  115. static inline int waitqueue_active(wait_queue_head_t *q)
  116. {
  117. return !list_empty(&q->task_list);
  118. }
  119. /**
  120. * wq_has_sleeper - check if there are any waiting processes
  121. * @wq: wait queue head
  122. *
  123. * Returns true if wq has waiting processes
  124. *
  125. * Please refer to the comment for waitqueue_active.
  126. */
  127. static inline bool wq_has_sleeper(wait_queue_head_t *wq)
  128. {
  129. /*
  130. * We need to be sure we are in sync with the
  131. * add_wait_queue modifications to the wait queue.
  132. *
  133. * This memory barrier should be paired with one on the
  134. * waiting side.
  135. */
  136. smp_mb();
  137. return waitqueue_active(wq);
  138. }
  139. extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
  140. extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait);
  141. extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
  142. static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
  143. {
  144. list_add(&new->task_list, &head->task_list);
  145. }
  146. /*
  147. * Used for wake-one threads:
  148. */
  149. static inline void
  150. __add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
  151. {
  152. wait->flags |= WQ_FLAG_EXCLUSIVE;
  153. __add_wait_queue(q, wait);
  154. }
  155. static inline void __add_wait_queue_tail(wait_queue_head_t *head,
  156. wait_queue_t *new)
  157. {
  158. list_add_tail(&new->task_list, &head->task_list);
  159. }
  160. static inline void
  161. __add_wait_queue_tail_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
  162. {
  163. wait->flags |= WQ_FLAG_EXCLUSIVE;
  164. __add_wait_queue_tail(q, wait);
  165. }
  166. static inline void
  167. __remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old)
  168. {
  169. list_del(&old->task_list);
  170. }
  171. typedef int wait_bit_action_f(struct wait_bit_key *, int mode);
  172. void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
  173. void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
  174. void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
  175. void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr);
  176. void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
  177. void __wake_up_bit(wait_queue_head_t *, void *, int);
  178. int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned);
  179. int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned);
  180. void wake_up_bit(void *, int);
  181. void wake_up_atomic_t(atomic_t *);
  182. int out_of_line_wait_on_bit(void *, int, wait_bit_action_f *, unsigned);
  183. int out_of_line_wait_on_bit_timeout(void *, int, wait_bit_action_f *, unsigned, unsigned long);
  184. int out_of_line_wait_on_bit_lock(void *, int, wait_bit_action_f *, unsigned);
  185. int out_of_line_wait_on_atomic_t(atomic_t *, int (*)(atomic_t *), unsigned);
  186. wait_queue_head_t *bit_waitqueue(void *, int);
  187. #define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
  188. #define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
  189. #define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL)
  190. #define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1)
  191. #define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0)
  192. #define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
  193. #define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
  194. #define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
  195. #define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
  196. /*
  197. * Wakeup macros to be used to report events to the targets.
  198. */
  199. #define wake_up_poll(x, m) \
  200. __wake_up(x, TASK_NORMAL, 1, (void *) (m))
  201. #define wake_up_locked_poll(x, m) \
  202. __wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
  203. #define wake_up_interruptible_poll(x, m) \
  204. __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
  205. #define wake_up_interruptible_sync_poll(x, m) \
  206. __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))
  207. #define ___wait_cond_timeout(condition) \
  208. ({ \
  209. bool __cond = (condition); \
  210. if (__cond && !__ret) \
  211. __ret = 1; \
  212. __cond || !__ret; \
  213. })
  214. #define ___wait_is_interruptible(state) \
  215. (!__builtin_constant_p(state) || \
  216. state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE) \
  217. /*
  218. * The below macro ___wait_event() has an explicit shadow of the __ret
  219. * variable when used from the wait_event_*() macros.
  220. *
  221. * This is so that both can use the ___wait_cond_timeout() construct
  222. * to wrap the condition.
  223. *
  224. * The type inconsistency of the wait_event_*() __ret variable is also
  225. * on purpose; we use long where we can return timeout values and int
  226. * otherwise.
  227. */
  228. #define ___wait_event(wq, condition, state, exclusive, ret, cmd) \
  229. ({ \
  230. __label__ __out; \
  231. wait_queue_t __wait; \
  232. long __ret = ret; /* explicit shadow */ \
  233. \
  234. INIT_LIST_HEAD(&__wait.task_list); \
  235. if (exclusive) \
  236. __wait.flags = WQ_FLAG_EXCLUSIVE; \
  237. else \
  238. __wait.flags = 0; \
  239. \
  240. for (;;) { \
  241. long __int = prepare_to_wait_event(&wq, &__wait, state);\
  242. \
  243. if (condition) \
  244. break; \
  245. \
  246. if (___wait_is_interruptible(state) && __int) { \
  247. __ret = __int; \
  248. if (exclusive) { \
  249. abort_exclusive_wait(&wq, &__wait, \
  250. state, NULL); \
  251. goto __out; \
  252. } \
  253. break; \
  254. } \
  255. \
  256. cmd; \
  257. } \
  258. finish_wait(&wq, &__wait); \
  259. __out: __ret; \
  260. })
  261. #define __wait_event(wq, condition) \
  262. (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
  263. schedule())
  264. /**
  265. * wait_event - sleep until a condition gets true
  266. * @wq: the waitqueue to wait on
  267. * @condition: a C expression for the event to wait for
  268. *
  269. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  270. * @condition evaluates to true. The @condition is checked each time
  271. * the waitqueue @wq is woken up.
  272. *
  273. * wake_up() has to be called after changing any variable that could
  274. * change the result of the wait condition.
  275. */
  276. #define wait_event(wq, condition) \
  277. do { \
  278. might_sleep(); \
  279. if (condition) \
  280. break; \
  281. __wait_event(wq, condition); \
  282. } while (0)
  283. #define __io_wait_event(wq, condition) \
  284. (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
  285. io_schedule())
  286. /*
  287. * io_wait_event() -- like wait_event() but with io_schedule()
  288. */
  289. #define io_wait_event(wq, condition) \
  290. do { \
  291. might_sleep(); \
  292. if (condition) \
  293. break; \
  294. __io_wait_event(wq, condition); \
  295. } while (0)
  296. #define __wait_event_freezable(wq, condition) \
  297. ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
  298. schedule(); try_to_freeze())
  299. /**
  300. * wait_event_freezable - sleep (or freeze) until a condition gets true
  301. * @wq: the waitqueue to wait on
  302. * @condition: a C expression for the event to wait for
  303. *
  304. * The process is put to sleep (TASK_INTERRUPTIBLE -- so as not to contribute
  305. * to system load) until the @condition evaluates to true. The
  306. * @condition is checked each time the waitqueue @wq is woken up.
  307. *
  308. * wake_up() has to be called after changing any variable that could
  309. * change the result of the wait condition.
  310. */
  311. #define wait_event_freezable(wq, condition) \
  312. ({ \
  313. int __ret = 0; \
  314. might_sleep(); \
  315. if (!(condition)) \
  316. __ret = __wait_event_freezable(wq, condition); \
  317. __ret; \
  318. })
  319. #define __wait_event_timeout(wq, condition, timeout) \
  320. ___wait_event(wq, ___wait_cond_timeout(condition), \
  321. TASK_UNINTERRUPTIBLE, 0, timeout, \
  322. __ret = schedule_timeout(__ret))
  323. /**
  324. * wait_event_timeout - sleep until a condition gets true or a timeout elapses
  325. * @wq: the waitqueue to wait on
  326. * @condition: a C expression for the event to wait for
  327. * @timeout: timeout, in jiffies
  328. *
  329. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  330. * @condition evaluates to true. The @condition is checked each time
  331. * the waitqueue @wq is woken up.
  332. *
  333. * wake_up() has to be called after changing any variable that could
  334. * change the result of the wait condition.
  335. *
  336. * Returns:
  337. * 0 if the @condition evaluated to %false after the @timeout elapsed,
  338. * 1 if the @condition evaluated to %true after the @timeout elapsed,
  339. * or the remaining jiffies (at least 1) if the @condition evaluated
  340. * to %true before the @timeout elapsed.
  341. */
  342. #define wait_event_timeout(wq, condition, timeout) \
  343. ({ \
  344. long __ret = timeout; \
  345. might_sleep(); \
  346. if (!___wait_cond_timeout(condition)) \
  347. __ret = __wait_event_timeout(wq, condition, timeout); \
  348. __ret; \
  349. })
  350. #define __wait_event_freezable_timeout(wq, condition, timeout) \
  351. ___wait_event(wq, ___wait_cond_timeout(condition), \
  352. TASK_INTERRUPTIBLE, 0, timeout, \
  353. __ret = schedule_timeout(__ret); try_to_freeze())
  354. /*
  355. * like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid
  356. * increasing load and is freezable.
  357. */
  358. #define wait_event_freezable_timeout(wq, condition, timeout) \
  359. ({ \
  360. long __ret = timeout; \
  361. might_sleep(); \
  362. if (!___wait_cond_timeout(condition)) \
  363. __ret = __wait_event_freezable_timeout(wq, condition, timeout); \
  364. __ret; \
  365. })
  366. #define __wait_event_exclusive_cmd(wq, condition, cmd1, cmd2) \
  367. (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 1, 0, \
  368. cmd1; schedule(); cmd2)
  369. /*
  370. * Just like wait_event_cmd(), except it sets exclusive flag
  371. */
  372. #define wait_event_exclusive_cmd(wq, condition, cmd1, cmd2) \
  373. do { \
  374. if (condition) \
  375. break; \
  376. __wait_event_exclusive_cmd(wq, condition, cmd1, cmd2); \
  377. } while (0)
  378. #define __wait_event_cmd(wq, condition, cmd1, cmd2) \
  379. (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
  380. cmd1; schedule(); cmd2)
  381. /**
  382. * wait_event_cmd - sleep until a condition gets true
  383. * @wq: the waitqueue to wait on
  384. * @condition: a C expression for the event to wait for
  385. * @cmd1: the command will be executed before sleep
  386. * @cmd2: the command will be executed after sleep
  387. *
  388. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  389. * @condition evaluates to true. The @condition is checked each time
  390. * the waitqueue @wq is woken up.
  391. *
  392. * wake_up() has to be called after changing any variable that could
  393. * change the result of the wait condition.
  394. */
  395. #define wait_event_cmd(wq, condition, cmd1, cmd2) \
  396. do { \
  397. if (condition) \
  398. break; \
  399. __wait_event_cmd(wq, condition, cmd1, cmd2); \
  400. } while (0)
  401. #define __wait_event_interruptible(wq, condition) \
  402. ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
  403. schedule())
  404. /**
  405. * wait_event_interruptible - sleep until a condition gets true
  406. * @wq: the waitqueue to wait on
  407. * @condition: a C expression for the event to wait for
  408. *
  409. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  410. * @condition evaluates to true or a signal is received.
  411. * The @condition is checked each time the waitqueue @wq is woken up.
  412. *
  413. * wake_up() has to be called after changing any variable that could
  414. * change the result of the wait condition.
  415. *
  416. * The function will return -ERESTARTSYS if it was interrupted by a
  417. * signal and 0 if @condition evaluated to true.
  418. */
  419. #define wait_event_interruptible(wq, condition) \
  420. ({ \
  421. int __ret = 0; \
  422. might_sleep(); \
  423. if (!(condition)) \
  424. __ret = __wait_event_interruptible(wq, condition); \
  425. __ret; \
  426. })
  427. #define __wait_event_interruptible_timeout(wq, condition, timeout) \
  428. ___wait_event(wq, ___wait_cond_timeout(condition), \
  429. TASK_INTERRUPTIBLE, 0, timeout, \
  430. __ret = schedule_timeout(__ret))
  431. /**
  432. * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
  433. * @wq: the waitqueue to wait on
  434. * @condition: a C expression for the event to wait for
  435. * @timeout: timeout, in jiffies
  436. *
  437. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  438. * @condition evaluates to true or a signal is received.
  439. * The @condition is checked each time the waitqueue @wq is woken up.
  440. *
  441. * wake_up() has to be called after changing any variable that could
  442. * change the result of the wait condition.
  443. *
  444. * Returns:
  445. * 0 if the @condition evaluated to %false after the @timeout elapsed,
  446. * 1 if the @condition evaluated to %true after the @timeout elapsed,
  447. * the remaining jiffies (at least 1) if the @condition evaluated
  448. * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
  449. * interrupted by a signal.
  450. */
  451. #define wait_event_interruptible_timeout(wq, condition, timeout) \
  452. ({ \
  453. long __ret = timeout; \
  454. might_sleep(); \
  455. if (!___wait_cond_timeout(condition)) \
  456. __ret = __wait_event_interruptible_timeout(wq, \
  457. condition, timeout); \
  458. __ret; \
  459. })
  460. #define __wait_event_hrtimeout(wq, condition, timeout, state) \
  461. ({ \
  462. int __ret = 0; \
  463. struct hrtimer_sleeper __t; \
  464. \
  465. hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC, \
  466. HRTIMER_MODE_REL); \
  467. hrtimer_init_sleeper(&__t, current); \
  468. if ((timeout).tv64 != KTIME_MAX) \
  469. hrtimer_start_range_ns(&__t.timer, timeout, \
  470. current->timer_slack_ns, \
  471. HRTIMER_MODE_REL); \
  472. \
  473. __ret = ___wait_event(wq, condition, state, 0, 0, \
  474. if (!__t.task) { \
  475. __ret = -ETIME; \
  476. break; \
  477. } \
  478. schedule()); \
  479. \
  480. hrtimer_cancel(&__t.timer); \
  481. destroy_hrtimer_on_stack(&__t.timer); \
  482. __ret; \
  483. })
  484. /**
  485. * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses
  486. * @wq: the waitqueue to wait on
  487. * @condition: a C expression for the event to wait for
  488. * @timeout: timeout, as a ktime_t
  489. *
  490. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  491. * @condition evaluates to true or a signal is received.
  492. * The @condition is checked each time the waitqueue @wq is woken up.
  493. *
  494. * wake_up() has to be called after changing any variable that could
  495. * change the result of the wait condition.
  496. *
  497. * The function returns 0 if @condition became true, or -ETIME if the timeout
  498. * elapsed.
  499. */
  500. #define wait_event_hrtimeout(wq, condition, timeout) \
  501. ({ \
  502. int __ret = 0; \
  503. might_sleep(); \
  504. if (!(condition)) \
  505. __ret = __wait_event_hrtimeout(wq, condition, timeout, \
  506. TASK_UNINTERRUPTIBLE); \
  507. __ret; \
  508. })
  509. /**
  510. * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
  511. * @wq: the waitqueue to wait on
  512. * @condition: a C expression for the event to wait for
  513. * @timeout: timeout, as a ktime_t
  514. *
  515. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  516. * @condition evaluates to true or a signal is received.
  517. * The @condition is checked each time the waitqueue @wq is woken up.
  518. *
  519. * wake_up() has to be called after changing any variable that could
  520. * change the result of the wait condition.
  521. *
  522. * The function returns 0 if @condition became true, -ERESTARTSYS if it was
  523. * interrupted by a signal, or -ETIME if the timeout elapsed.
  524. */
  525. #define wait_event_interruptible_hrtimeout(wq, condition, timeout) \
  526. ({ \
  527. long __ret = 0; \
  528. might_sleep(); \
  529. if (!(condition)) \
  530. __ret = __wait_event_hrtimeout(wq, condition, timeout, \
  531. TASK_INTERRUPTIBLE); \
  532. __ret; \
  533. })
  534. #define __wait_event_interruptible_exclusive(wq, condition) \
  535. ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
  536. schedule())
  537. #define wait_event_interruptible_exclusive(wq, condition) \
  538. ({ \
  539. int __ret = 0; \
  540. might_sleep(); \
  541. if (!(condition)) \
  542. __ret = __wait_event_interruptible_exclusive(wq, condition);\
  543. __ret; \
  544. })
  545. #define __wait_event_freezable_exclusive(wq, condition) \
  546. ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
  547. schedule(); try_to_freeze())
  548. #define wait_event_freezable_exclusive(wq, condition) \
  549. ({ \
  550. int __ret = 0; \
  551. might_sleep(); \
  552. if (!(condition)) \
  553. __ret = __wait_event_freezable_exclusive(wq, condition);\
  554. __ret; \
  555. })
  556. #define __wait_event_interruptible_locked(wq, condition, exclusive, irq) \
  557. ({ \
  558. int __ret = 0; \
  559. DEFINE_WAIT(__wait); \
  560. if (exclusive) \
  561. __wait.flags |= WQ_FLAG_EXCLUSIVE; \
  562. do { \
  563. if (likely(list_empty(&__wait.task_list))) \
  564. __add_wait_queue_tail(&(wq), &__wait); \
  565. set_current_state(TASK_INTERRUPTIBLE); \
  566. if (signal_pending(current)) { \
  567. __ret = -ERESTARTSYS; \
  568. break; \
  569. } \
  570. if (irq) \
  571. spin_unlock_irq(&(wq).lock); \
  572. else \
  573. spin_unlock(&(wq).lock); \
  574. schedule(); \
  575. if (irq) \
  576. spin_lock_irq(&(wq).lock); \
  577. else \
  578. spin_lock(&(wq).lock); \
  579. } while (!(condition)); \
  580. __remove_wait_queue(&(wq), &__wait); \
  581. __set_current_state(TASK_RUNNING); \
  582. __ret; \
  583. })
  584. /**
  585. * wait_event_interruptible_locked - sleep until a condition gets true
  586. * @wq: the waitqueue to wait on
  587. * @condition: a C expression for the event to wait for
  588. *
  589. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  590. * @condition evaluates to true or a signal is received.
  591. * The @condition is checked each time the waitqueue @wq is woken up.
  592. *
  593. * It must be called with wq.lock being held. This spinlock is
  594. * unlocked while sleeping but @condition testing is done while lock
  595. * is held and when this macro exits the lock is held.
  596. *
  597. * The lock is locked/unlocked using spin_lock()/spin_unlock()
  598. * functions which must match the way they are locked/unlocked outside
  599. * of this macro.
  600. *
  601. * wake_up_locked() has to be called after changing any variable that could
  602. * change the result of the wait condition.
  603. *
  604. * The function will return -ERESTARTSYS if it was interrupted by a
  605. * signal and 0 if @condition evaluated to true.
  606. */
  607. #define wait_event_interruptible_locked(wq, condition) \
  608. ((condition) \
  609. ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 0))
  610. /**
  611. * wait_event_interruptible_locked_irq - sleep until a condition gets true
  612. * @wq: the waitqueue to wait on
  613. * @condition: a C expression for the event to wait for
  614. *
  615. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  616. * @condition evaluates to true or a signal is received.
  617. * The @condition is checked each time the waitqueue @wq is woken up.
  618. *
  619. * It must be called with wq.lock being held. This spinlock is
  620. * unlocked while sleeping but @condition testing is done while lock
  621. * is held and when this macro exits the lock is held.
  622. *
  623. * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
  624. * functions which must match the way they are locked/unlocked outside
  625. * of this macro.
  626. *
  627. * wake_up_locked() has to be called after changing any variable that could
  628. * change the result of the wait condition.
  629. *
  630. * The function will return -ERESTARTSYS if it was interrupted by a
  631. * signal and 0 if @condition evaluated to true.
  632. */
  633. #define wait_event_interruptible_locked_irq(wq, condition) \
  634. ((condition) \
  635. ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 1))
  636. /**
  637. * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
  638. * @wq: the waitqueue to wait on
  639. * @condition: a C expression for the event to wait for
  640. *
  641. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  642. * @condition evaluates to true or a signal is received.
  643. * The @condition is checked each time the waitqueue @wq is woken up.
  644. *
  645. * It must be called with wq.lock being held. This spinlock is
  646. * unlocked while sleeping but @condition testing is done while lock
  647. * is held and when this macro exits the lock is held.
  648. *
  649. * The lock is locked/unlocked using spin_lock()/spin_unlock()
  650. * functions which must match the way they are locked/unlocked outside
  651. * of this macro.
  652. *
  653. * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
  654. * set thus when other process waits process on the list if this
  655. * process is awaken further processes are not considered.
  656. *
  657. * wake_up_locked() has to be called after changing any variable that could
  658. * change the result of the wait condition.
  659. *
  660. * The function will return -ERESTARTSYS if it was interrupted by a
  661. * signal and 0 if @condition evaluated to true.
  662. */
  663. #define wait_event_interruptible_exclusive_locked(wq, condition) \
  664. ((condition) \
  665. ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 0))
  666. /**
  667. * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
  668. * @wq: the waitqueue to wait on
  669. * @condition: a C expression for the event to wait for
  670. *
  671. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  672. * @condition evaluates to true or a signal is received.
  673. * The @condition is checked each time the waitqueue @wq is woken up.
  674. *
  675. * It must be called with wq.lock being held. This spinlock is
  676. * unlocked while sleeping but @condition testing is done while lock
  677. * is held and when this macro exits the lock is held.
  678. *
  679. * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
  680. * functions which must match the way they are locked/unlocked outside
  681. * of this macro.
  682. *
  683. * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
  684. * set thus when other process waits process on the list if this
  685. * process is awaken further processes are not considered.
  686. *
  687. * wake_up_locked() has to be called after changing any variable that could
  688. * change the result of the wait condition.
  689. *
  690. * The function will return -ERESTARTSYS if it was interrupted by a
  691. * signal and 0 if @condition evaluated to true.
  692. */
  693. #define wait_event_interruptible_exclusive_locked_irq(wq, condition) \
  694. ((condition) \
  695. ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1))
  696. #define __wait_event_killable(wq, condition) \
  697. ___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
  698. /**
  699. * wait_event_killable - sleep until a condition gets true
  700. * @wq: the waitqueue to wait on
  701. * @condition: a C expression for the event to wait for
  702. *
  703. * The process is put to sleep (TASK_KILLABLE) until the
  704. * @condition evaluates to true or a signal is received.
  705. * The @condition is checked each time the waitqueue @wq is woken up.
  706. *
  707. * wake_up() has to be called after changing any variable that could
  708. * change the result of the wait condition.
  709. *
  710. * The function will return -ERESTARTSYS if it was interrupted by a
  711. * signal and 0 if @condition evaluated to true.
  712. */
  713. #define wait_event_killable(wq, condition) \
  714. ({ \
  715. int __ret = 0; \
  716. might_sleep(); \
  717. if (!(condition)) \
  718. __ret = __wait_event_killable(wq, condition); \
  719. __ret; \
  720. })
  721. #define __wait_event_lock_irq(wq, condition, lock, cmd) \
  722. (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
  723. spin_unlock_irq(&lock); \
  724. cmd; \
  725. schedule(); \
  726. spin_lock_irq(&lock))
  727. /**
  728. * wait_event_lock_irq_cmd - sleep until a condition gets true. The
  729. * condition is checked under the lock. This
  730. * is expected to be called with the lock
  731. * taken.
  732. * @wq: the waitqueue to wait on
  733. * @condition: a C expression for the event to wait for
  734. * @lock: a locked spinlock_t, which will be released before cmd
  735. * and schedule() and reacquired afterwards.
  736. * @cmd: a command which is invoked outside the critical section before
  737. * sleep
  738. *
  739. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  740. * @condition evaluates to true. The @condition is checked each time
  741. * the waitqueue @wq is woken up.
  742. *
  743. * wake_up() has to be called after changing any variable that could
  744. * change the result of the wait condition.
  745. *
  746. * This is supposed to be called while holding the lock. The lock is
  747. * dropped before invoking the cmd and going to sleep and is reacquired
  748. * afterwards.
  749. */
  750. #define wait_event_lock_irq_cmd(wq, condition, lock, cmd) \
  751. do { \
  752. if (condition) \
  753. break; \
  754. __wait_event_lock_irq(wq, condition, lock, cmd); \
  755. } while (0)
  756. /**
  757. * wait_event_lock_irq - sleep until a condition gets true. The
  758. * condition is checked under the lock. This
  759. * is expected to be called with the lock
  760. * taken.
  761. * @wq: the waitqueue to wait on
  762. * @condition: a C expression for the event to wait for
  763. * @lock: a locked spinlock_t, which will be released before schedule()
  764. * and reacquired afterwards.
  765. *
  766. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  767. * @condition evaluates to true. The @condition is checked each time
  768. * the waitqueue @wq is woken up.
  769. *
  770. * wake_up() has to be called after changing any variable that could
  771. * change the result of the wait condition.
  772. *
  773. * This is supposed to be called while holding the lock. The lock is
  774. * dropped before going to sleep and is reacquired afterwards.
  775. */
  776. #define wait_event_lock_irq(wq, condition, lock) \
  777. do { \
  778. if (condition) \
  779. break; \
  780. __wait_event_lock_irq(wq, condition, lock, ); \
  781. } while (0)
  782. #define __wait_event_interruptible_lock_irq(wq, condition, lock, cmd) \
  783. ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
  784. spin_unlock_irq(&lock); \
  785. cmd; \
  786. schedule(); \
  787. spin_lock_irq(&lock))
  788. /**
  789. * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
  790. * The condition is checked under the lock. This is expected to
  791. * be called with the lock taken.
  792. * @wq: the waitqueue to wait on
  793. * @condition: a C expression for the event to wait for
  794. * @lock: a locked spinlock_t, which will be released before cmd and
  795. * schedule() and reacquired afterwards.
  796. * @cmd: a command which is invoked outside the critical section before
  797. * sleep
  798. *
  799. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  800. * @condition evaluates to true or a signal is received. The @condition is
  801. * checked each time the waitqueue @wq is woken up.
  802. *
  803. * wake_up() has to be called after changing any variable that could
  804. * change the result of the wait condition.
  805. *
  806. * This is supposed to be called while holding the lock. The lock is
  807. * dropped before invoking the cmd and going to sleep and is reacquired
  808. * afterwards.
  809. *
  810. * The macro will return -ERESTARTSYS if it was interrupted by a signal
  811. * and 0 if @condition evaluated to true.
  812. */
  813. #define wait_event_interruptible_lock_irq_cmd(wq, condition, lock, cmd) \
  814. ({ \
  815. int __ret = 0; \
  816. if (!(condition)) \
  817. __ret = __wait_event_interruptible_lock_irq(wq, \
  818. condition, lock, cmd); \
  819. __ret; \
  820. })
  821. /**
  822. * wait_event_interruptible_lock_irq - sleep until a condition gets true.
  823. * The condition is checked under the lock. This is expected
  824. * to be called with the lock taken.
  825. * @wq: the waitqueue to wait on
  826. * @condition: a C expression for the event to wait for
  827. * @lock: a locked spinlock_t, which will be released before schedule()
  828. * and reacquired afterwards.
  829. *
  830. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  831. * @condition evaluates to true or signal is received. The @condition is
  832. * checked each time the waitqueue @wq is woken up.
  833. *
  834. * wake_up() has to be called after changing any variable that could
  835. * change the result of the wait condition.
  836. *
  837. * This is supposed to be called while holding the lock. The lock is
  838. * dropped before going to sleep and is reacquired afterwards.
  839. *
  840. * The macro will return -ERESTARTSYS if it was interrupted by a signal
  841. * and 0 if @condition evaluated to true.
  842. */
  843. #define wait_event_interruptible_lock_irq(wq, condition, lock) \
  844. ({ \
  845. int __ret = 0; \
  846. if (!(condition)) \
  847. __ret = __wait_event_interruptible_lock_irq(wq, \
  848. condition, lock,); \
  849. __ret; \
  850. })
  851. #define __wait_event_interruptible_lock_irq_timeout(wq, condition, \
  852. lock, timeout) \
  853. ___wait_event(wq, ___wait_cond_timeout(condition), \
  854. TASK_INTERRUPTIBLE, 0, timeout, \
  855. spin_unlock_irq(&lock); \
  856. __ret = schedule_timeout(__ret); \
  857. spin_lock_irq(&lock));
  858. /**
  859. * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets
  860. * true or a timeout elapses. The condition is checked under
  861. * the lock. This is expected to be called with the lock taken.
  862. * @wq: the waitqueue to wait on
  863. * @condition: a C expression for the event to wait for
  864. * @lock: a locked spinlock_t, which will be released before schedule()
  865. * and reacquired afterwards.
  866. * @timeout: timeout, in jiffies
  867. *
  868. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  869. * @condition evaluates to true or signal is received. The @condition is
  870. * checked each time the waitqueue @wq is woken up.
  871. *
  872. * wake_up() has to be called after changing any variable that could
  873. * change the result of the wait condition.
  874. *
  875. * This is supposed to be called while holding the lock. The lock is
  876. * dropped before going to sleep and is reacquired afterwards.
  877. *
  878. * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
  879. * was interrupted by a signal, and the remaining jiffies otherwise
  880. * if the condition evaluated to true before the timeout elapsed.
  881. */
  882. #define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \
  883. timeout) \
  884. ({ \
  885. long __ret = timeout; \
  886. if (!___wait_cond_timeout(condition)) \
  887. __ret = __wait_event_interruptible_lock_irq_timeout( \
  888. wq, condition, lock, timeout); \
  889. __ret; \
  890. })
  891. /*
  892. * Waitqueues which are removed from the waitqueue_head at wakeup time
  893. */
  894. void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
  895. void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state);
  896. long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state);
  897. void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
  898. void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, unsigned int mode, void *key);
  899. long wait_woken(wait_queue_t *wait, unsigned mode, long timeout);
  900. int woken_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
  901. int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
  902. int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
  903. #define DEFINE_WAIT_FUNC(name, function) \
  904. wait_queue_t name = { \
  905. .private = current, \
  906. .func = function, \
  907. .task_list = LIST_HEAD_INIT((name).task_list), \
  908. }
  909. #define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
  910. #define DEFINE_WAIT_BIT(name, word, bit) \
  911. struct wait_bit_queue name = { \
  912. .key = __WAIT_BIT_KEY_INITIALIZER(word, bit), \
  913. .wait = { \
  914. .private = current, \
  915. .func = wake_bit_function, \
  916. .task_list = \
  917. LIST_HEAD_INIT((name).wait.task_list), \
  918. }, \
  919. }
  920. #define init_wait(wait) \
  921. do { \
  922. (wait)->private = current; \
  923. (wait)->func = autoremove_wake_function; \
  924. INIT_LIST_HEAD(&(wait)->task_list); \
  925. (wait)->flags = 0; \
  926. } while (0)
  927. extern int bit_wait(struct wait_bit_key *, int);
  928. extern int bit_wait_io(struct wait_bit_key *, int);
  929. extern int bit_wait_timeout(struct wait_bit_key *, int);
  930. extern int bit_wait_io_timeout(struct wait_bit_key *, int);
  931. /**
  932. * wait_on_bit - wait for a bit to be cleared
  933. * @word: the word being waited on, a kernel virtual address
  934. * @bit: the bit of the word being waited on
  935. * @mode: the task state to sleep in
  936. *
  937. * There is a standard hashed waitqueue table for generic use. This
  938. * is the part of the hashtable's accessor API that waits on a bit.
  939. * For instance, if one were to have waiters on a bitflag, one would
  940. * call wait_on_bit() in threads waiting for the bit to clear.
  941. * One uses wait_on_bit() where one is waiting for the bit to clear,
  942. * but has no intention of setting it.
  943. * Returned value will be zero if the bit was cleared, or non-zero
  944. * if the process received a signal and the mode permitted wakeup
  945. * on that signal.
  946. */
  947. static inline int
  948. wait_on_bit(unsigned long *word, int bit, unsigned mode)
  949. {
  950. might_sleep();
  951. if (!test_bit(bit, word))
  952. return 0;
  953. return out_of_line_wait_on_bit(word, bit,
  954. bit_wait,
  955. mode);
  956. }
  957. /**
  958. * wait_on_bit_io - wait for a bit to be cleared
  959. * @word: the word being waited on, a kernel virtual address
  960. * @bit: the bit of the word being waited on
  961. * @mode: the task state to sleep in
  962. *
  963. * Use the standard hashed waitqueue table to wait for a bit
  964. * to be cleared. This is similar to wait_on_bit(), but calls
  965. * io_schedule() instead of schedule() for the actual waiting.
  966. *
  967. * Returned value will be zero if the bit was cleared, or non-zero
  968. * if the process received a signal and the mode permitted wakeup
  969. * on that signal.
  970. */
  971. static inline int
  972. wait_on_bit_io(unsigned long *word, int bit, unsigned mode)
  973. {
  974. might_sleep();
  975. if (!test_bit(bit, word))
  976. return 0;
  977. return out_of_line_wait_on_bit(word, bit,
  978. bit_wait_io,
  979. mode);
  980. }
  981. /**
  982. * wait_on_bit_timeout - wait for a bit to be cleared or a timeout elapses
  983. * @word: the word being waited on, a kernel virtual address
  984. * @bit: the bit of the word being waited on
  985. * @mode: the task state to sleep in
  986. * @timeout: timeout, in jiffies
  987. *
  988. * Use the standard hashed waitqueue table to wait for a bit
  989. * to be cleared. This is similar to wait_on_bit(), except also takes a
  990. * timeout parameter.
  991. *
  992. * Returned value will be zero if the bit was cleared before the
  993. * @timeout elapsed, or non-zero if the @timeout elapsed or process
  994. * received a signal and the mode permitted wakeup on that signal.
  995. */
  996. static inline int
  997. wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode,
  998. unsigned long timeout)
  999. {
  1000. might_sleep();
  1001. if (!test_bit(bit, word))
  1002. return 0;
  1003. return out_of_line_wait_on_bit_timeout(word, bit,
  1004. bit_wait_timeout,
  1005. mode, timeout);
  1006. }
  1007. /**
  1008. * wait_on_bit_action - wait for a bit to be cleared
  1009. * @word: the word being waited on, a kernel virtual address
  1010. * @bit: the bit of the word being waited on
  1011. * @action: the function used to sleep, which may take special actions
  1012. * @mode: the task state to sleep in
  1013. *
  1014. * Use the standard hashed waitqueue table to wait for a bit
  1015. * to be cleared, and allow the waiting action to be specified.
  1016. * This is like wait_on_bit() but allows fine control of how the waiting
  1017. * is done.
  1018. *
  1019. * Returned value will be zero if the bit was cleared, or non-zero
  1020. * if the process received a signal and the mode permitted wakeup
  1021. * on that signal.
  1022. */
  1023. static inline int
  1024. wait_on_bit_action(unsigned long *word, int bit, wait_bit_action_f *action,
  1025. unsigned mode)
  1026. {
  1027. might_sleep();
  1028. if (!test_bit(bit, word))
  1029. return 0;
  1030. return out_of_line_wait_on_bit(word, bit, action, mode);
  1031. }
  1032. /**
  1033. * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it
  1034. * @word: the word being waited on, a kernel virtual address
  1035. * @bit: the bit of the word being waited on
  1036. * @mode: the task state to sleep in
  1037. *
  1038. * There is a standard hashed waitqueue table for generic use. This
  1039. * is the part of the hashtable's accessor API that waits on a bit
  1040. * when one intends to set it, for instance, trying to lock bitflags.
  1041. * For instance, if one were to have waiters trying to set bitflag
  1042. * and waiting for it to clear before setting it, one would call
  1043. * wait_on_bit() in threads waiting to be able to set the bit.
  1044. * One uses wait_on_bit_lock() where one is waiting for the bit to
  1045. * clear with the intention of setting it, and when done, clearing it.
  1046. *
  1047. * Returns zero if the bit was (eventually) found to be clear and was
  1048. * set. Returns non-zero if a signal was delivered to the process and
  1049. * the @mode allows that signal to wake the process.
  1050. */
  1051. static inline int
  1052. wait_on_bit_lock(unsigned long *word, int bit, unsigned mode)
  1053. {
  1054. might_sleep();
  1055. if (!test_and_set_bit(bit, word))
  1056. return 0;
  1057. return out_of_line_wait_on_bit_lock(word, bit, bit_wait, mode);
  1058. }
  1059. /**
  1060. * wait_on_bit_lock_io - wait for a bit to be cleared, when wanting to set it
  1061. * @word: the word being waited on, a kernel virtual address
  1062. * @bit: the bit of the word being waited on
  1063. * @mode: the task state to sleep in
  1064. *
  1065. * Use the standard hashed waitqueue table to wait for a bit
  1066. * to be cleared and then to atomically set it. This is similar
  1067. * to wait_on_bit(), but calls io_schedule() instead of schedule()
  1068. * for the actual waiting.
  1069. *
  1070. * Returns zero if the bit was (eventually) found to be clear and was
  1071. * set. Returns non-zero if a signal was delivered to the process and
  1072. * the @mode allows that signal to wake the process.
  1073. */
  1074. static inline int
  1075. wait_on_bit_lock_io(unsigned long *word, int bit, unsigned mode)
  1076. {
  1077. might_sleep();
  1078. if (!test_and_set_bit(bit, word))
  1079. return 0;
  1080. return out_of_line_wait_on_bit_lock(word, bit, bit_wait_io, mode);
  1081. }
  1082. /**
  1083. * wait_on_bit_lock_action - wait for a bit to be cleared, when wanting to set it
  1084. * @word: the word being waited on, a kernel virtual address
  1085. * @bit: the bit of the word being waited on
  1086. * @action: the function used to sleep, which may take special actions
  1087. * @mode: the task state to sleep in
  1088. *
  1089. * Use the standard hashed waitqueue table to wait for a bit
  1090. * to be cleared and then to set it, and allow the waiting action
  1091. * to be specified.
  1092. * This is like wait_on_bit() but allows fine control of how the waiting
  1093. * is done.
  1094. *
  1095. * Returns zero if the bit was (eventually) found to be clear and was
  1096. * set. Returns non-zero if a signal was delivered to the process and
  1097. * the @mode allows that signal to wake the process.
  1098. */
  1099. static inline int
  1100. wait_on_bit_lock_action(unsigned long *word, int bit, wait_bit_action_f *action,
  1101. unsigned mode)
  1102. {
  1103. might_sleep();
  1104. if (!test_and_set_bit(bit, word))
  1105. return 0;
  1106. return out_of_line_wait_on_bit_lock(word, bit, action, mode);
  1107. }
  1108. /**
  1109. * wait_on_atomic_t - Wait for an atomic_t to become 0
  1110. * @val: The atomic value being waited on, a kernel virtual address
  1111. * @action: the function used to sleep, which may take special actions
  1112. * @mode: the task state to sleep in
  1113. *
  1114. * Wait for an atomic_t to become 0. We abuse the bit-wait waitqueue table for
  1115. * the purpose of getting a waitqueue, but we set the key to a bit number
  1116. * outside of the target 'word'.
  1117. */
  1118. static inline
  1119. int wait_on_atomic_t(atomic_t *val, int (*action)(atomic_t *), unsigned mode)
  1120. {
  1121. might_sleep();
  1122. if (atomic_read(val) == 0)
  1123. return 0;
  1124. return out_of_line_wait_on_atomic_t(val, action, mode);
  1125. }
  1126. #endif /* _LINUX_WAIT_H */