wait.h 39 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158
  1. #ifndef _LINUX_WAIT_H
  2. #define _LINUX_WAIT_H
  3. /*
  4. * Linux wait queue related types and methods
  5. */
  6. #include <linux/list.h>
  7. #include <linux/stddef.h>
  8. #include <linux/spinlock.h>
  9. #include <asm/current.h>
  10. #include <uapi/linux/wait.h>
  11. typedef struct __wait_queue wait_queue_t;
  12. typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
  13. int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key);
  14. /* __wait_queue::flags */
  15. #define WQ_FLAG_EXCLUSIVE 0x01
  16. #define WQ_FLAG_WOKEN 0x02
  17. struct __wait_queue {
  18. unsigned int flags;
  19. void *private;
  20. wait_queue_func_t func;
  21. struct list_head task_list;
  22. };
  23. struct wait_bit_key {
  24. void *flags;
  25. int bit_nr;
  26. #define WAIT_ATOMIC_T_BIT_NR -1
  27. unsigned long timeout;
  28. };
  29. struct wait_bit_queue {
  30. struct wait_bit_key key;
  31. wait_queue_t wait;
  32. };
  33. struct __wait_queue_head {
  34. spinlock_t lock;
  35. struct list_head task_list;
  36. };
  37. typedef struct __wait_queue_head wait_queue_head_t;
  38. struct task_struct;
  39. /*
  40. * Macros for declaration and initialisaton of the datatypes
  41. */
  42. #define __WAITQUEUE_INITIALIZER(name, tsk) { \
  43. .private = tsk, \
  44. .func = default_wake_function, \
  45. .task_list = { NULL, NULL } }
  46. #define DECLARE_WAITQUEUE(name, tsk) \
  47. wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)
  48. #define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
  49. .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
  50. .task_list = { &(name).task_list, &(name).task_list } }
  51. #define DECLARE_WAIT_QUEUE_HEAD(name) \
  52. wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
  53. #define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
  54. { .flags = word, .bit_nr = bit, }
  55. #define __WAIT_ATOMIC_T_KEY_INITIALIZER(p) \
  56. { .flags = p, .bit_nr = WAIT_ATOMIC_T_BIT_NR, }
  57. extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *);
  58. #define init_waitqueue_head(q) \
  59. do { \
  60. static struct lock_class_key __key; \
  61. \
  62. __init_waitqueue_head((q), #q, &__key); \
  63. } while (0)
  64. #ifdef CONFIG_LOCKDEP
  65. # define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
  66. ({ init_waitqueue_head(&name); name; })
  67. # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
  68. wait_queue_head_t name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
  69. #else
  70. # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
  71. #endif
  72. static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
  73. {
  74. q->flags = 0;
  75. q->private = p;
  76. q->func = default_wake_function;
  77. }
  78. static inline void
  79. init_waitqueue_func_entry(wait_queue_t *q, wait_queue_func_t func)
  80. {
  81. q->flags = 0;
  82. q->private = NULL;
  83. q->func = func;
  84. }
  85. static inline int waitqueue_active(wait_queue_head_t *q)
  86. {
  87. return !list_empty(&q->task_list);
  88. }
  89. extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
  90. extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait);
  91. extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
  92. static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
  93. {
  94. list_add(&new->task_list, &head->task_list);
  95. }
  96. /*
  97. * Used for wake-one threads:
  98. */
  99. static inline void
  100. __add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
  101. {
  102. wait->flags |= WQ_FLAG_EXCLUSIVE;
  103. __add_wait_queue(q, wait);
  104. }
  105. static inline void __add_wait_queue_tail(wait_queue_head_t *head,
  106. wait_queue_t *new)
  107. {
  108. list_add_tail(&new->task_list, &head->task_list);
  109. }
  110. static inline void
  111. __add_wait_queue_tail_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
  112. {
  113. wait->flags |= WQ_FLAG_EXCLUSIVE;
  114. __add_wait_queue_tail(q, wait);
  115. }
  116. static inline void
  117. __remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old)
  118. {
  119. list_del(&old->task_list);
  120. }
  121. typedef int wait_bit_action_f(struct wait_bit_key *);
  122. void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
  123. void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
  124. void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
  125. void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr);
  126. void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
  127. void __wake_up_bit(wait_queue_head_t *, void *, int);
  128. int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned);
  129. int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned);
  130. void wake_up_bit(void *, int);
  131. void wake_up_atomic_t(atomic_t *);
  132. int out_of_line_wait_on_bit(void *, int, wait_bit_action_f *, unsigned);
  133. int out_of_line_wait_on_bit_timeout(void *, int, wait_bit_action_f *, unsigned, unsigned long);
  134. int out_of_line_wait_on_bit_lock(void *, int, wait_bit_action_f *, unsigned);
  135. int out_of_line_wait_on_atomic_t(atomic_t *, int (*)(atomic_t *), unsigned);
  136. wait_queue_head_t *bit_waitqueue(void *, int);
  137. #define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
  138. #define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
  139. #define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL)
  140. #define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1)
  141. #define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0)
  142. #define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
  143. #define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
  144. #define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
  145. #define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
  146. /*
  147. * Wakeup macros to be used to report events to the targets.
  148. */
  149. #define wake_up_poll(x, m) \
  150. __wake_up(x, TASK_NORMAL, 1, (void *) (m))
  151. #define wake_up_locked_poll(x, m) \
  152. __wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
  153. #define wake_up_interruptible_poll(x, m) \
  154. __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
  155. #define wake_up_interruptible_sync_poll(x, m) \
  156. __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))
  157. #define ___wait_cond_timeout(condition) \
  158. ({ \
  159. bool __cond = (condition); \
  160. if (__cond && !__ret) \
  161. __ret = 1; \
  162. __cond || !__ret; \
  163. })
  164. #define ___wait_is_interruptible(state) \
  165. (!__builtin_constant_p(state) || \
  166. state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE) \
  167. /*
  168. * The below macro ___wait_event() has an explicit shadow of the __ret
  169. * variable when used from the wait_event_*() macros.
  170. *
  171. * This is so that both can use the ___wait_cond_timeout() construct
  172. * to wrap the condition.
  173. *
  174. * The type inconsistency of the wait_event_*() __ret variable is also
  175. * on purpose; we use long where we can return timeout values and int
  176. * otherwise.
  177. */
  178. #define ___wait_event(wq, condition, state, exclusive, ret, cmd) \
  179. ({ \
  180. __label__ __out; \
  181. wait_queue_t __wait; \
  182. long __ret = ret; /* explicit shadow */ \
  183. \
  184. INIT_LIST_HEAD(&__wait.task_list); \
  185. if (exclusive) \
  186. __wait.flags = WQ_FLAG_EXCLUSIVE; \
  187. else \
  188. __wait.flags = 0; \
  189. \
  190. for (;;) { \
  191. long __int = prepare_to_wait_event(&wq, &__wait, state);\
  192. \
  193. if (condition) \
  194. break; \
  195. \
  196. if (___wait_is_interruptible(state) && __int) { \
  197. __ret = __int; \
  198. if (exclusive) { \
  199. abort_exclusive_wait(&wq, &__wait, \
  200. state, NULL); \
  201. goto __out; \
  202. } \
  203. break; \
  204. } \
  205. \
  206. cmd; \
  207. } \
  208. finish_wait(&wq, &__wait); \
  209. __out: __ret; \
  210. })
  211. #define __wait_event(wq, condition) \
  212. (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
  213. schedule())
  214. /**
  215. * wait_event - sleep until a condition gets true
  216. * @wq: the waitqueue to wait on
  217. * @condition: a C expression for the event to wait for
  218. *
  219. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  220. * @condition evaluates to true. The @condition is checked each time
  221. * the waitqueue @wq is woken up.
  222. *
  223. * wake_up() has to be called after changing any variable that could
  224. * change the result of the wait condition.
  225. */
  226. #define wait_event(wq, condition) \
  227. do { \
  228. might_sleep(); \
  229. if (condition) \
  230. break; \
  231. __wait_event(wq, condition); \
  232. } while (0)
  233. #define __io_wait_event(wq, condition) \
  234. (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
  235. io_schedule())
  236. /*
  237. * io_wait_event() -- like wait_event() but with io_schedule()
  238. */
  239. #define io_wait_event(wq, condition) \
  240. do { \
  241. might_sleep(); \
  242. if (condition) \
  243. break; \
  244. __io_wait_event(wq, condition); \
  245. } while (0)
  246. #define __wait_event_freezable(wq, condition) \
  247. ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
  248. schedule(); try_to_freeze())
  249. /**
  250. * wait_event - sleep (or freeze) until a condition gets true
  251. * @wq: the waitqueue to wait on
  252. * @condition: a C expression for the event to wait for
  253. *
  254. * The process is put to sleep (TASK_INTERRUPTIBLE -- so as not to contribute
  255. * to system load) until the @condition evaluates to true. The
  256. * @condition is checked each time the waitqueue @wq is woken up.
  257. *
  258. * wake_up() has to be called after changing any variable that could
  259. * change the result of the wait condition.
  260. */
  261. #define wait_event_freezable(wq, condition) \
  262. ({ \
  263. int __ret = 0; \
  264. might_sleep(); \
  265. if (!(condition)) \
  266. __ret = __wait_event_freezable(wq, condition); \
  267. __ret; \
  268. })
  269. #define __wait_event_timeout(wq, condition, timeout) \
  270. ___wait_event(wq, ___wait_cond_timeout(condition), \
  271. TASK_UNINTERRUPTIBLE, 0, timeout, \
  272. __ret = schedule_timeout(__ret))
  273. /**
  274. * wait_event_timeout - sleep until a condition gets true or a timeout elapses
  275. * @wq: the waitqueue to wait on
  276. * @condition: a C expression for the event to wait for
  277. * @timeout: timeout, in jiffies
  278. *
  279. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  280. * @condition evaluates to true. The @condition is checked each time
  281. * the waitqueue @wq is woken up.
  282. *
  283. * wake_up() has to be called after changing any variable that could
  284. * change the result of the wait condition.
  285. *
  286. * Returns:
  287. * 0 if the @condition evaluated to %false after the @timeout elapsed,
  288. * 1 if the @condition evaluated to %true after the @timeout elapsed,
  289. * or the remaining jiffies (at least 1) if the @condition evaluated
  290. * to %true before the @timeout elapsed.
  291. */
  292. #define wait_event_timeout(wq, condition, timeout) \
  293. ({ \
  294. long __ret = timeout; \
  295. might_sleep(); \
  296. if (!___wait_cond_timeout(condition)) \
  297. __ret = __wait_event_timeout(wq, condition, timeout); \
  298. __ret; \
  299. })
  300. #define __wait_event_freezable_timeout(wq, condition, timeout) \
  301. ___wait_event(wq, ___wait_cond_timeout(condition), \
  302. TASK_INTERRUPTIBLE, 0, timeout, \
  303. __ret = schedule_timeout(__ret); try_to_freeze())
  304. /*
  305. * like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid
  306. * increasing load and is freezable.
  307. */
  308. #define wait_event_freezable_timeout(wq, condition, timeout) \
  309. ({ \
  310. long __ret = timeout; \
  311. might_sleep(); \
  312. if (!___wait_cond_timeout(condition)) \
  313. __ret = __wait_event_freezable_timeout(wq, condition, timeout); \
  314. __ret; \
  315. })
  316. #define __wait_event_cmd(wq, condition, cmd1, cmd2) \
  317. (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
  318. cmd1; schedule(); cmd2)
  319. /**
  320. * wait_event_cmd - sleep until a condition gets true
  321. * @wq: the waitqueue to wait on
  322. * @condition: a C expression for the event to wait for
  323. * @cmd1: the command will be executed before sleep
  324. * @cmd2: the command will be executed after sleep
  325. *
  326. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  327. * @condition evaluates to true. The @condition is checked each time
  328. * the waitqueue @wq is woken up.
  329. *
  330. * wake_up() has to be called after changing any variable that could
  331. * change the result of the wait condition.
  332. */
  333. #define wait_event_cmd(wq, condition, cmd1, cmd2) \
  334. do { \
  335. if (condition) \
  336. break; \
  337. __wait_event_cmd(wq, condition, cmd1, cmd2); \
  338. } while (0)
  339. #define __wait_event_interruptible(wq, condition) \
  340. ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
  341. schedule())
  342. /**
  343. * wait_event_interruptible - sleep until a condition gets true
  344. * @wq: the waitqueue to wait on
  345. * @condition: a C expression for the event to wait for
  346. *
  347. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  348. * @condition evaluates to true or a signal is received.
  349. * The @condition is checked each time the waitqueue @wq is woken up.
  350. *
  351. * wake_up() has to be called after changing any variable that could
  352. * change the result of the wait condition.
  353. *
  354. * The function will return -ERESTARTSYS if it was interrupted by a
  355. * signal and 0 if @condition evaluated to true.
  356. */
  357. #define wait_event_interruptible(wq, condition) \
  358. ({ \
  359. int __ret = 0; \
  360. might_sleep(); \
  361. if (!(condition)) \
  362. __ret = __wait_event_interruptible(wq, condition); \
  363. __ret; \
  364. })
  365. #define __wait_event_interruptible_timeout(wq, condition, timeout) \
  366. ___wait_event(wq, ___wait_cond_timeout(condition), \
  367. TASK_INTERRUPTIBLE, 0, timeout, \
  368. __ret = schedule_timeout(__ret))
  369. /**
  370. * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
  371. * @wq: the waitqueue to wait on
  372. * @condition: a C expression for the event to wait for
  373. * @timeout: timeout, in jiffies
  374. *
  375. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  376. * @condition evaluates to true or a signal is received.
  377. * The @condition is checked each time the waitqueue @wq is woken up.
  378. *
  379. * wake_up() has to be called after changing any variable that could
  380. * change the result of the wait condition.
  381. *
  382. * Returns:
  383. * 0 if the @condition evaluated to %false after the @timeout elapsed,
  384. * 1 if the @condition evaluated to %true after the @timeout elapsed,
  385. * the remaining jiffies (at least 1) if the @condition evaluated
  386. * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
  387. * interrupted by a signal.
  388. */
  389. #define wait_event_interruptible_timeout(wq, condition, timeout) \
  390. ({ \
  391. long __ret = timeout; \
  392. might_sleep(); \
  393. if (!___wait_cond_timeout(condition)) \
  394. __ret = __wait_event_interruptible_timeout(wq, \
  395. condition, timeout); \
  396. __ret; \
  397. })
  398. #define __wait_event_hrtimeout(wq, condition, timeout, state) \
  399. ({ \
  400. int __ret = 0; \
  401. struct hrtimer_sleeper __t; \
  402. \
  403. hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC, \
  404. HRTIMER_MODE_REL); \
  405. hrtimer_init_sleeper(&__t, current); \
  406. if ((timeout).tv64 != KTIME_MAX) \
  407. hrtimer_start_range_ns(&__t.timer, timeout, \
  408. current->timer_slack_ns, \
  409. HRTIMER_MODE_REL); \
  410. \
  411. __ret = ___wait_event(wq, condition, state, 0, 0, \
  412. if (!__t.task) { \
  413. __ret = -ETIME; \
  414. break; \
  415. } \
  416. schedule()); \
  417. \
  418. hrtimer_cancel(&__t.timer); \
  419. destroy_hrtimer_on_stack(&__t.timer); \
  420. __ret; \
  421. })
  422. /**
  423. * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses
  424. * @wq: the waitqueue to wait on
  425. * @condition: a C expression for the event to wait for
  426. * @timeout: timeout, as a ktime_t
  427. *
  428. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  429. * @condition evaluates to true or a signal is received.
  430. * The @condition is checked each time the waitqueue @wq is woken up.
  431. *
  432. * wake_up() has to be called after changing any variable that could
  433. * change the result of the wait condition.
  434. *
  435. * The function returns 0 if @condition became true, or -ETIME if the timeout
  436. * elapsed.
  437. */
  438. #define wait_event_hrtimeout(wq, condition, timeout) \
  439. ({ \
  440. int __ret = 0; \
  441. might_sleep(); \
  442. if (!(condition)) \
  443. __ret = __wait_event_hrtimeout(wq, condition, timeout, \
  444. TASK_UNINTERRUPTIBLE); \
  445. __ret; \
  446. })
  447. /**
  448. * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
  449. * @wq: the waitqueue to wait on
  450. * @condition: a C expression for the event to wait for
  451. * @timeout: timeout, as a ktime_t
  452. *
  453. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  454. * @condition evaluates to true or a signal is received.
  455. * The @condition is checked each time the waitqueue @wq is woken up.
  456. *
  457. * wake_up() has to be called after changing any variable that could
  458. * change the result of the wait condition.
  459. *
  460. * The function returns 0 if @condition became true, -ERESTARTSYS if it was
  461. * interrupted by a signal, or -ETIME if the timeout elapsed.
  462. */
  463. #define wait_event_interruptible_hrtimeout(wq, condition, timeout) \
  464. ({ \
  465. long __ret = 0; \
  466. might_sleep(); \
  467. if (!(condition)) \
  468. __ret = __wait_event_hrtimeout(wq, condition, timeout, \
  469. TASK_INTERRUPTIBLE); \
  470. __ret; \
  471. })
  472. #define __wait_event_interruptible_exclusive(wq, condition) \
  473. ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
  474. schedule())
  475. #define wait_event_interruptible_exclusive(wq, condition) \
  476. ({ \
  477. int __ret = 0; \
  478. might_sleep(); \
  479. if (!(condition)) \
  480. __ret = __wait_event_interruptible_exclusive(wq, condition);\
  481. __ret; \
  482. })
  483. #define __wait_event_freezable_exclusive(wq, condition) \
  484. ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
  485. schedule(); try_to_freeze())
  486. #define wait_event_freezable_exclusive(wq, condition) \
  487. ({ \
  488. int __ret = 0; \
  489. might_sleep(); \
  490. if (!(condition)) \
  491. __ret = __wait_event_freezable_exclusive(wq, condition);\
  492. __ret; \
  493. })
  494. #define __wait_event_interruptible_locked(wq, condition, exclusive, irq) \
  495. ({ \
  496. int __ret = 0; \
  497. DEFINE_WAIT(__wait); \
  498. if (exclusive) \
  499. __wait.flags |= WQ_FLAG_EXCLUSIVE; \
  500. do { \
  501. if (likely(list_empty(&__wait.task_list))) \
  502. __add_wait_queue_tail(&(wq), &__wait); \
  503. set_current_state(TASK_INTERRUPTIBLE); \
  504. if (signal_pending(current)) { \
  505. __ret = -ERESTARTSYS; \
  506. break; \
  507. } \
  508. if (irq) \
  509. spin_unlock_irq(&(wq).lock); \
  510. else \
  511. spin_unlock(&(wq).lock); \
  512. schedule(); \
  513. if (irq) \
  514. spin_lock_irq(&(wq).lock); \
  515. else \
  516. spin_lock(&(wq).lock); \
  517. } while (!(condition)); \
  518. __remove_wait_queue(&(wq), &__wait); \
  519. __set_current_state(TASK_RUNNING); \
  520. __ret; \
  521. })
  522. /**
  523. * wait_event_interruptible_locked - sleep until a condition gets true
  524. * @wq: the waitqueue to wait on
  525. * @condition: a C expression for the event to wait for
  526. *
  527. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  528. * @condition evaluates to true or a signal is received.
  529. * The @condition is checked each time the waitqueue @wq is woken up.
  530. *
  531. * It must be called with wq.lock being held. This spinlock is
  532. * unlocked while sleeping but @condition testing is done while lock
  533. * is held and when this macro exits the lock is held.
  534. *
  535. * The lock is locked/unlocked using spin_lock()/spin_unlock()
  536. * functions which must match the way they are locked/unlocked outside
  537. * of this macro.
  538. *
  539. * wake_up_locked() has to be called after changing any variable that could
  540. * change the result of the wait condition.
  541. *
  542. * The function will return -ERESTARTSYS if it was interrupted by a
  543. * signal and 0 if @condition evaluated to true.
  544. */
  545. #define wait_event_interruptible_locked(wq, condition) \
  546. ((condition) \
  547. ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 0))
  548. /**
  549. * wait_event_interruptible_locked_irq - sleep until a condition gets true
  550. * @wq: the waitqueue to wait on
  551. * @condition: a C expression for the event to wait for
  552. *
  553. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  554. * @condition evaluates to true or a signal is received.
  555. * The @condition is checked each time the waitqueue @wq is woken up.
  556. *
  557. * It must be called with wq.lock being held. This spinlock is
  558. * unlocked while sleeping but @condition testing is done while lock
  559. * is held and when this macro exits the lock is held.
  560. *
  561. * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
  562. * functions which must match the way they are locked/unlocked outside
  563. * of this macro.
  564. *
  565. * wake_up_locked() has to be called after changing any variable that could
  566. * change the result of the wait condition.
  567. *
  568. * The function will return -ERESTARTSYS if it was interrupted by a
  569. * signal and 0 if @condition evaluated to true.
  570. */
  571. #define wait_event_interruptible_locked_irq(wq, condition) \
  572. ((condition) \
  573. ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 1))
  574. /**
  575. * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
  576. * @wq: the waitqueue to wait on
  577. * @condition: a C expression for the event to wait for
  578. *
  579. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  580. * @condition evaluates to true or a signal is received.
  581. * The @condition is checked each time the waitqueue @wq is woken up.
  582. *
  583. * It must be called with wq.lock being held. This spinlock is
  584. * unlocked while sleeping but @condition testing is done while lock
  585. * is held and when this macro exits the lock is held.
  586. *
  587. * The lock is locked/unlocked using spin_lock()/spin_unlock()
  588. * functions which must match the way they are locked/unlocked outside
  589. * of this macro.
  590. *
  591. * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
  592. * set thus when other process waits process on the list if this
  593. * process is awaken further processes are not considered.
  594. *
  595. * wake_up_locked() has to be called after changing any variable that could
  596. * change the result of the wait condition.
  597. *
  598. * The function will return -ERESTARTSYS if it was interrupted by a
  599. * signal and 0 if @condition evaluated to true.
  600. */
  601. #define wait_event_interruptible_exclusive_locked(wq, condition) \
  602. ((condition) \
  603. ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 0))
  604. /**
  605. * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
  606. * @wq: the waitqueue to wait on
  607. * @condition: a C expression for the event to wait for
  608. *
  609. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  610. * @condition evaluates to true or a signal is received.
  611. * The @condition is checked each time the waitqueue @wq is woken up.
  612. *
  613. * It must be called with wq.lock being held. This spinlock is
  614. * unlocked while sleeping but @condition testing is done while lock
  615. * is held and when this macro exits the lock is held.
  616. *
  617. * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
  618. * functions which must match the way they are locked/unlocked outside
  619. * of this macro.
  620. *
  621. * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
  622. * set thus when other process waits process on the list if this
  623. * process is awaken further processes are not considered.
  624. *
  625. * wake_up_locked() has to be called after changing any variable that could
  626. * change the result of the wait condition.
  627. *
  628. * The function will return -ERESTARTSYS if it was interrupted by a
  629. * signal and 0 if @condition evaluated to true.
  630. */
  631. #define wait_event_interruptible_exclusive_locked_irq(wq, condition) \
  632. ((condition) \
  633. ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1))
  634. #define __wait_event_killable(wq, condition) \
  635. ___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
  636. /**
  637. * wait_event_killable - sleep until a condition gets true
  638. * @wq: the waitqueue to wait on
  639. * @condition: a C expression for the event to wait for
  640. *
  641. * The process is put to sleep (TASK_KILLABLE) until the
  642. * @condition evaluates to true or a signal is received.
  643. * The @condition is checked each time the waitqueue @wq is woken up.
  644. *
  645. * wake_up() has to be called after changing any variable that could
  646. * change the result of the wait condition.
  647. *
  648. * The function will return -ERESTARTSYS if it was interrupted by a
  649. * signal and 0 if @condition evaluated to true.
  650. */
  651. #define wait_event_killable(wq, condition) \
  652. ({ \
  653. int __ret = 0; \
  654. might_sleep(); \
  655. if (!(condition)) \
  656. __ret = __wait_event_killable(wq, condition); \
  657. __ret; \
  658. })
  659. #define __wait_event_lock_irq(wq, condition, lock, cmd) \
  660. (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
  661. spin_unlock_irq(&lock); \
  662. cmd; \
  663. schedule(); \
  664. spin_lock_irq(&lock))
  665. /**
  666. * wait_event_lock_irq_cmd - sleep until a condition gets true. The
  667. * condition is checked under the lock. This
  668. * is expected to be called with the lock
  669. * taken.
  670. * @wq: the waitqueue to wait on
  671. * @condition: a C expression for the event to wait for
  672. * @lock: a locked spinlock_t, which will be released before cmd
  673. * and schedule() and reacquired afterwards.
  674. * @cmd: a command which is invoked outside the critical section before
  675. * sleep
  676. *
  677. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  678. * @condition evaluates to true. The @condition is checked each time
  679. * the waitqueue @wq is woken up.
  680. *
  681. * wake_up() has to be called after changing any variable that could
  682. * change the result of the wait condition.
  683. *
  684. * This is supposed to be called while holding the lock. The lock is
  685. * dropped before invoking the cmd and going to sleep and is reacquired
  686. * afterwards.
  687. */
  688. #define wait_event_lock_irq_cmd(wq, condition, lock, cmd) \
  689. do { \
  690. if (condition) \
  691. break; \
  692. __wait_event_lock_irq(wq, condition, lock, cmd); \
  693. } while (0)
  694. /**
  695. * wait_event_lock_irq - sleep until a condition gets true. The
  696. * condition is checked under the lock. This
  697. * is expected to be called with the lock
  698. * taken.
  699. * @wq: the waitqueue to wait on
  700. * @condition: a C expression for the event to wait for
  701. * @lock: a locked spinlock_t, which will be released before schedule()
  702. * and reacquired afterwards.
  703. *
  704. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  705. * @condition evaluates to true. The @condition is checked each time
  706. * the waitqueue @wq is woken up.
  707. *
  708. * wake_up() has to be called after changing any variable that could
  709. * change the result of the wait condition.
  710. *
  711. * This is supposed to be called while holding the lock. The lock is
  712. * dropped before going to sleep and is reacquired afterwards.
  713. */
  714. #define wait_event_lock_irq(wq, condition, lock) \
  715. do { \
  716. if (condition) \
  717. break; \
  718. __wait_event_lock_irq(wq, condition, lock, ); \
  719. } while (0)
  720. #define __wait_event_interruptible_lock_irq(wq, condition, lock, cmd) \
  721. ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
  722. spin_unlock_irq(&lock); \
  723. cmd; \
  724. schedule(); \
  725. spin_lock_irq(&lock))
  726. /**
  727. * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
  728. * The condition is checked under the lock. This is expected to
  729. * be called with the lock taken.
  730. * @wq: the waitqueue to wait on
  731. * @condition: a C expression for the event to wait for
  732. * @lock: a locked spinlock_t, which will be released before cmd and
  733. * schedule() and reacquired afterwards.
  734. * @cmd: a command which is invoked outside the critical section before
  735. * sleep
  736. *
  737. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  738. * @condition evaluates to true or a signal is received. The @condition is
  739. * checked each time the waitqueue @wq is woken up.
  740. *
  741. * wake_up() has to be called after changing any variable that could
  742. * change the result of the wait condition.
  743. *
  744. * This is supposed to be called while holding the lock. The lock is
  745. * dropped before invoking the cmd and going to sleep and is reacquired
  746. * afterwards.
  747. *
  748. * The macro will return -ERESTARTSYS if it was interrupted by a signal
  749. * and 0 if @condition evaluated to true.
  750. */
  751. #define wait_event_interruptible_lock_irq_cmd(wq, condition, lock, cmd) \
  752. ({ \
  753. int __ret = 0; \
  754. if (!(condition)) \
  755. __ret = __wait_event_interruptible_lock_irq(wq, \
  756. condition, lock, cmd); \
  757. __ret; \
  758. })
  759. /**
  760. * wait_event_interruptible_lock_irq - sleep until a condition gets true.
  761. * The condition is checked under the lock. This is expected
  762. * to be called with the lock taken.
  763. * @wq: the waitqueue to wait on
  764. * @condition: a C expression for the event to wait for
  765. * @lock: a locked spinlock_t, which will be released before schedule()
  766. * and reacquired afterwards.
  767. *
  768. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  769. * @condition evaluates to true or signal is received. The @condition is
  770. * checked each time the waitqueue @wq is woken up.
  771. *
  772. * wake_up() has to be called after changing any variable that could
  773. * change the result of the wait condition.
  774. *
  775. * This is supposed to be called while holding the lock. The lock is
  776. * dropped before going to sleep and is reacquired afterwards.
  777. *
  778. * The macro will return -ERESTARTSYS if it was interrupted by a signal
  779. * and 0 if @condition evaluated to true.
  780. */
  781. #define wait_event_interruptible_lock_irq(wq, condition, lock) \
  782. ({ \
  783. int __ret = 0; \
  784. if (!(condition)) \
  785. __ret = __wait_event_interruptible_lock_irq(wq, \
  786. condition, lock,); \
  787. __ret; \
  788. })
  789. #define __wait_event_interruptible_lock_irq_timeout(wq, condition, \
  790. lock, timeout) \
  791. ___wait_event(wq, ___wait_cond_timeout(condition), \
  792. TASK_INTERRUPTIBLE, 0, timeout, \
  793. spin_unlock_irq(&lock); \
  794. __ret = schedule_timeout(__ret); \
  795. spin_lock_irq(&lock));
  796. /**
  797. * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets
  798. * true or a timeout elapses. The condition is checked under
  799. * the lock. This is expected to be called with the lock taken.
  800. * @wq: the waitqueue to wait on
  801. * @condition: a C expression for the event to wait for
  802. * @lock: a locked spinlock_t, which will be released before schedule()
  803. * and reacquired afterwards.
  804. * @timeout: timeout, in jiffies
  805. *
  806. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  807. * @condition evaluates to true or signal is received. The @condition is
  808. * checked each time the waitqueue @wq is woken up.
  809. *
  810. * wake_up() has to be called after changing any variable that could
  811. * change the result of the wait condition.
  812. *
  813. * This is supposed to be called while holding the lock. The lock is
  814. * dropped before going to sleep and is reacquired afterwards.
  815. *
  816. * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
  817. * was interrupted by a signal, and the remaining jiffies otherwise
  818. * if the condition evaluated to true before the timeout elapsed.
  819. */
  820. #define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \
  821. timeout) \
  822. ({ \
  823. long __ret = timeout; \
  824. if (!___wait_cond_timeout(condition)) \
  825. __ret = __wait_event_interruptible_lock_irq_timeout( \
  826. wq, condition, lock, timeout); \
  827. __ret; \
  828. })
  829. /*
  830. * Waitqueues which are removed from the waitqueue_head at wakeup time
  831. */
  832. void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
  833. void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state);
  834. long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state);
  835. void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
  836. void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, unsigned int mode, void *key);
  837. long wait_woken(wait_queue_t *wait, unsigned mode, long timeout);
  838. int woken_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
  839. int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
  840. int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
  841. #define DEFINE_WAIT_FUNC(name, function) \
  842. wait_queue_t name = { \
  843. .private = current, \
  844. .func = function, \
  845. .task_list = LIST_HEAD_INIT((name).task_list), \
  846. }
  847. #define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
  848. #define DEFINE_WAIT_BIT(name, word, bit) \
  849. struct wait_bit_queue name = { \
  850. .key = __WAIT_BIT_KEY_INITIALIZER(word, bit), \
  851. .wait = { \
  852. .private = current, \
  853. .func = wake_bit_function, \
  854. .task_list = \
  855. LIST_HEAD_INIT((name).wait.task_list), \
  856. }, \
  857. }
  858. #define init_wait(wait) \
  859. do { \
  860. (wait)->private = current; \
  861. (wait)->func = autoremove_wake_function; \
  862. INIT_LIST_HEAD(&(wait)->task_list); \
  863. (wait)->flags = 0; \
  864. } while (0)
  865. extern int bit_wait(struct wait_bit_key *);
  866. extern int bit_wait_io(struct wait_bit_key *);
  867. extern int bit_wait_timeout(struct wait_bit_key *);
  868. extern int bit_wait_io_timeout(struct wait_bit_key *);
  869. /**
  870. * wait_on_bit - wait for a bit to be cleared
  871. * @word: the word being waited on, a kernel virtual address
  872. * @bit: the bit of the word being waited on
  873. * @mode: the task state to sleep in
  874. *
  875. * There is a standard hashed waitqueue table for generic use. This
  876. * is the part of the hashtable's accessor API that waits on a bit.
  877. * For instance, if one were to have waiters on a bitflag, one would
  878. * call wait_on_bit() in threads waiting for the bit to clear.
  879. * One uses wait_on_bit() where one is waiting for the bit to clear,
  880. * but has no intention of setting it.
  881. * Returned value will be zero if the bit was cleared, or non-zero
  882. * if the process received a signal and the mode permitted wakeup
  883. * on that signal.
  884. */
  885. static inline int
  886. wait_on_bit(unsigned long *word, int bit, unsigned mode)
  887. {
  888. might_sleep();
  889. if (!test_bit(bit, word))
  890. return 0;
  891. return out_of_line_wait_on_bit(word, bit,
  892. bit_wait,
  893. mode);
  894. }
  895. /**
  896. * wait_on_bit_io - wait for a bit to be cleared
  897. * @word: the word being waited on, a kernel virtual address
  898. * @bit: the bit of the word being waited on
  899. * @mode: the task state to sleep in
  900. *
  901. * Use the standard hashed waitqueue table to wait for a bit
  902. * to be cleared. This is similar to wait_on_bit(), but calls
  903. * io_schedule() instead of schedule() for the actual waiting.
  904. *
  905. * Returned value will be zero if the bit was cleared, or non-zero
  906. * if the process received a signal and the mode permitted wakeup
  907. * on that signal.
  908. */
  909. static inline int
  910. wait_on_bit_io(unsigned long *word, int bit, unsigned mode)
  911. {
  912. might_sleep();
  913. if (!test_bit(bit, word))
  914. return 0;
  915. return out_of_line_wait_on_bit(word, bit,
  916. bit_wait_io,
  917. mode);
  918. }
  919. /**
  920. * wait_on_bit_timeout - wait for a bit to be cleared or a timeout elapses
  921. * @word: the word being waited on, a kernel virtual address
  922. * @bit: the bit of the word being waited on
  923. * @mode: the task state to sleep in
  924. * @timeout: timeout, in jiffies
  925. *
  926. * Use the standard hashed waitqueue table to wait for a bit
  927. * to be cleared. This is similar to wait_on_bit(), except also takes a
  928. * timeout parameter.
  929. *
  930. * Returned value will be zero if the bit was cleared before the
  931. * @timeout elapsed, or non-zero if the @timeout elapsed or process
  932. * received a signal and the mode permitted wakeup on that signal.
  933. */
  934. static inline int
  935. wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode,
  936. unsigned long timeout)
  937. {
  938. might_sleep();
  939. if (!test_bit(bit, word))
  940. return 0;
  941. return out_of_line_wait_on_bit_timeout(word, bit,
  942. bit_wait_timeout,
  943. mode, timeout);
  944. }
  945. /**
  946. * wait_on_bit_action - wait for a bit to be cleared
  947. * @word: the word being waited on, a kernel virtual address
  948. * @bit: the bit of the word being waited on
  949. * @action: the function used to sleep, which may take special actions
  950. * @mode: the task state to sleep in
  951. *
  952. * Use the standard hashed waitqueue table to wait for a bit
  953. * to be cleared, and allow the waiting action to be specified.
  954. * This is like wait_on_bit() but allows fine control of how the waiting
  955. * is done.
  956. *
  957. * Returned value will be zero if the bit was cleared, or non-zero
  958. * if the process received a signal and the mode permitted wakeup
  959. * on that signal.
  960. */
  961. static inline int
  962. wait_on_bit_action(unsigned long *word, int bit, wait_bit_action_f *action,
  963. unsigned mode)
  964. {
  965. might_sleep();
  966. if (!test_bit(bit, word))
  967. return 0;
  968. return out_of_line_wait_on_bit(word, bit, action, mode);
  969. }
  970. /**
  971. * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it
  972. * @word: the word being waited on, a kernel virtual address
  973. * @bit: the bit of the word being waited on
  974. * @mode: the task state to sleep in
  975. *
  976. * There is a standard hashed waitqueue table for generic use. This
  977. * is the part of the hashtable's accessor API that waits on a bit
  978. * when one intends to set it, for instance, trying to lock bitflags.
  979. * For instance, if one were to have waiters trying to set bitflag
  980. * and waiting for it to clear before setting it, one would call
  981. * wait_on_bit() in threads waiting to be able to set the bit.
  982. * One uses wait_on_bit_lock() where one is waiting for the bit to
  983. * clear with the intention of setting it, and when done, clearing it.
  984. *
  985. * Returns zero if the bit was (eventually) found to be clear and was
  986. * set. Returns non-zero if a signal was delivered to the process and
  987. * the @mode allows that signal to wake the process.
  988. */
  989. static inline int
  990. wait_on_bit_lock(unsigned long *word, int bit, unsigned mode)
  991. {
  992. might_sleep();
  993. if (!test_and_set_bit(bit, word))
  994. return 0;
  995. return out_of_line_wait_on_bit_lock(word, bit, bit_wait, mode);
  996. }
  997. /**
  998. * wait_on_bit_lock_io - wait for a bit to be cleared, when wanting to set it
  999. * @word: the word being waited on, a kernel virtual address
  1000. * @bit: the bit of the word being waited on
  1001. * @mode: the task state to sleep in
  1002. *
  1003. * Use the standard hashed waitqueue table to wait for a bit
  1004. * to be cleared and then to atomically set it. This is similar
  1005. * to wait_on_bit(), but calls io_schedule() instead of schedule()
  1006. * for the actual waiting.
  1007. *
  1008. * Returns zero if the bit was (eventually) found to be clear and was
  1009. * set. Returns non-zero if a signal was delivered to the process and
  1010. * the @mode allows that signal to wake the process.
  1011. */
  1012. static inline int
  1013. wait_on_bit_lock_io(unsigned long *word, int bit, unsigned mode)
  1014. {
  1015. might_sleep();
  1016. if (!test_and_set_bit(bit, word))
  1017. return 0;
  1018. return out_of_line_wait_on_bit_lock(word, bit, bit_wait_io, mode);
  1019. }
  1020. /**
  1021. * wait_on_bit_lock_action - wait for a bit to be cleared, when wanting to set it
  1022. * @word: the word being waited on, a kernel virtual address
  1023. * @bit: the bit of the word being waited on
  1024. * @action: the function used to sleep, which may take special actions
  1025. * @mode: the task state to sleep in
  1026. *
  1027. * Use the standard hashed waitqueue table to wait for a bit
  1028. * to be cleared and then to set it, and allow the waiting action
  1029. * to be specified.
  1030. * This is like wait_on_bit() but allows fine control of how the waiting
  1031. * is done.
  1032. *
  1033. * Returns zero if the bit was (eventually) found to be clear and was
  1034. * set. Returns non-zero if a signal was delivered to the process and
  1035. * the @mode allows that signal to wake the process.
  1036. */
  1037. static inline int
  1038. wait_on_bit_lock_action(unsigned long *word, int bit, wait_bit_action_f *action,
  1039. unsigned mode)
  1040. {
  1041. might_sleep();
  1042. if (!test_and_set_bit(bit, word))
  1043. return 0;
  1044. return out_of_line_wait_on_bit_lock(word, bit, action, mode);
  1045. }
  1046. /**
  1047. * wait_on_atomic_t - Wait for an atomic_t to become 0
  1048. * @val: The atomic value being waited on, a kernel virtual address
  1049. * @action: the function used to sleep, which may take special actions
  1050. * @mode: the task state to sleep in
  1051. *
  1052. * Wait for an atomic_t to become 0. We abuse the bit-wait waitqueue table for
  1053. * the purpose of getting a waitqueue, but we set the key to a bit number
  1054. * outside of the target 'word'.
  1055. */
  1056. static inline
  1057. int wait_on_atomic_t(atomic_t *val, int (*action)(atomic_t *), unsigned mode)
  1058. {
  1059. might_sleep();
  1060. if (atomic_read(val) == 0)
  1061. return 0;
  1062. return out_of_line_wait_on_atomic_t(val, action, mode);
  1063. }
  1064. #endif /* _LINUX_WAIT_H */