wait.h 38 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130
  1. #ifndef _LINUX_WAIT_H
  2. #define _LINUX_WAIT_H
  3. /*
  4. * Linux wait queue related types and methods
  5. */
  6. #include <linux/list.h>
  7. #include <linux/stddef.h>
  8. #include <linux/spinlock.h>
  9. #include <asm/current.h>
  10. #include <uapi/linux/wait.h>
  11. typedef struct __wait_queue wait_queue_t;
  12. typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
  13. int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key);
  14. /* __wait_queue::flags */
  15. #define WQ_FLAG_EXCLUSIVE 0x01
  16. #define WQ_FLAG_WOKEN 0x02
  17. struct __wait_queue {
  18. unsigned int flags;
  19. void *private;
  20. wait_queue_func_t func;
  21. struct list_head task_list;
  22. };
  23. struct wait_bit_key {
  24. void *flags;
  25. int bit_nr;
  26. #define WAIT_ATOMIC_T_BIT_NR -1
  27. unsigned long timeout;
  28. };
  29. struct wait_bit_queue {
  30. struct wait_bit_key key;
  31. wait_queue_t wait;
  32. };
  33. struct __wait_queue_head {
  34. spinlock_t lock;
  35. struct list_head task_list;
  36. };
  37. typedef struct __wait_queue_head wait_queue_head_t;
  38. struct task_struct;
  39. /*
  40. * Macros for declaration and initialisaton of the datatypes
  41. */
  42. #define __WAITQUEUE_INITIALIZER(name, tsk) { \
  43. .private = tsk, \
  44. .func = default_wake_function, \
  45. .task_list = { NULL, NULL } }
  46. #define DECLARE_WAITQUEUE(name, tsk) \
  47. wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)
  48. #define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
  49. .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
  50. .task_list = { &(name).task_list, &(name).task_list } }
  51. #define DECLARE_WAIT_QUEUE_HEAD(name) \
  52. wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
  53. #define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
  54. { .flags = word, .bit_nr = bit, }
  55. #define __WAIT_ATOMIC_T_KEY_INITIALIZER(p) \
  56. { .flags = p, .bit_nr = WAIT_ATOMIC_T_BIT_NR, }
  57. extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *);
  58. #define init_waitqueue_head(q) \
  59. do { \
  60. static struct lock_class_key __key; \
  61. \
  62. __init_waitqueue_head((q), #q, &__key); \
  63. } while (0)
  64. #ifdef CONFIG_LOCKDEP
  65. # define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
  66. ({ init_waitqueue_head(&name); name; })
  67. # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
  68. wait_queue_head_t name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
  69. #else
  70. # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
  71. #endif
  72. static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
  73. {
  74. q->flags = 0;
  75. q->private = p;
  76. q->func = default_wake_function;
  77. }
  78. static inline void
  79. init_waitqueue_func_entry(wait_queue_t *q, wait_queue_func_t func)
  80. {
  81. q->flags = 0;
  82. q->private = NULL;
  83. q->func = func;
  84. }
  85. static inline int waitqueue_active(wait_queue_head_t *q)
  86. {
  87. return !list_empty(&q->task_list);
  88. }
  89. extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
  90. extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait);
  91. extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
  92. static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
  93. {
  94. list_add(&new->task_list, &head->task_list);
  95. }
  96. /*
  97. * Used for wake-one threads:
  98. */
  99. static inline void
  100. __add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
  101. {
  102. wait->flags |= WQ_FLAG_EXCLUSIVE;
  103. __add_wait_queue(q, wait);
  104. }
  105. static inline void __add_wait_queue_tail(wait_queue_head_t *head,
  106. wait_queue_t *new)
  107. {
  108. list_add_tail(&new->task_list, &head->task_list);
  109. }
  110. static inline void
  111. __add_wait_queue_tail_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
  112. {
  113. wait->flags |= WQ_FLAG_EXCLUSIVE;
  114. __add_wait_queue_tail(q, wait);
  115. }
  116. static inline void
  117. __remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old)
  118. {
  119. list_del(&old->task_list);
  120. }
  121. typedef int wait_bit_action_f(struct wait_bit_key *);
  122. void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
  123. void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
  124. void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
  125. void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr);
  126. void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
  127. void __wake_up_bit(wait_queue_head_t *, void *, int);
  128. int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned);
  129. int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned);
  130. void wake_up_bit(void *, int);
  131. void wake_up_atomic_t(atomic_t *);
  132. int out_of_line_wait_on_bit(void *, int, wait_bit_action_f *, unsigned);
  133. int out_of_line_wait_on_bit_timeout(void *, int, wait_bit_action_f *, unsigned, unsigned long);
  134. int out_of_line_wait_on_bit_lock(void *, int, wait_bit_action_f *, unsigned);
  135. int out_of_line_wait_on_atomic_t(atomic_t *, int (*)(atomic_t *), unsigned);
  136. wait_queue_head_t *bit_waitqueue(void *, int);
  137. #define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
  138. #define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
  139. #define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL)
  140. #define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1)
  141. #define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0)
  142. #define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
  143. #define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
  144. #define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
  145. #define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
  146. /*
  147. * Wakeup macros to be used to report events to the targets.
  148. */
  149. #define wake_up_poll(x, m) \
  150. __wake_up(x, TASK_NORMAL, 1, (void *) (m))
  151. #define wake_up_locked_poll(x, m) \
  152. __wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
  153. #define wake_up_interruptible_poll(x, m) \
  154. __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
  155. #define wake_up_interruptible_sync_poll(x, m) \
  156. __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))
  157. #define ___wait_cond_timeout(condition) \
  158. ({ \
  159. bool __cond = (condition); \
  160. if (__cond && !__ret) \
  161. __ret = 1; \
  162. __cond || !__ret; \
  163. })
  164. #define ___wait_is_interruptible(state) \
  165. (!__builtin_constant_p(state) || \
  166. state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE) \
  167. /*
  168. * The below macro ___wait_event() has an explicit shadow of the __ret
  169. * variable when used from the wait_event_*() macros.
  170. *
  171. * This is so that both can use the ___wait_cond_timeout() construct
  172. * to wrap the condition.
  173. *
  174. * The type inconsistency of the wait_event_*() __ret variable is also
  175. * on purpose; we use long where we can return timeout values and int
  176. * otherwise.
  177. */
  178. #define ___wait_event(wq, condition, state, exclusive, ret, cmd) \
  179. ({ \
  180. __label__ __out; \
  181. wait_queue_t __wait; \
  182. long __ret = ret; /* explicit shadow */ \
  183. \
  184. INIT_LIST_HEAD(&__wait.task_list); \
  185. if (exclusive) \
  186. __wait.flags = WQ_FLAG_EXCLUSIVE; \
  187. else \
  188. __wait.flags = 0; \
  189. \
  190. for (;;) { \
  191. long __int = prepare_to_wait_event(&wq, &__wait, state);\
  192. \
  193. if (condition) \
  194. break; \
  195. \
  196. if (___wait_is_interruptible(state) && __int) { \
  197. __ret = __int; \
  198. if (exclusive) { \
  199. abort_exclusive_wait(&wq, &__wait, \
  200. state, NULL); \
  201. goto __out; \
  202. } \
  203. break; \
  204. } \
  205. \
  206. cmd; \
  207. } \
  208. finish_wait(&wq, &__wait); \
  209. __out: __ret; \
  210. })
  211. #define __wait_event(wq, condition) \
  212. (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
  213. schedule())
  214. /**
  215. * wait_event - sleep until a condition gets true
  216. * @wq: the waitqueue to wait on
  217. * @condition: a C expression for the event to wait for
  218. *
  219. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  220. * @condition evaluates to true. The @condition is checked each time
  221. * the waitqueue @wq is woken up.
  222. *
  223. * wake_up() has to be called after changing any variable that could
  224. * change the result of the wait condition.
  225. */
  226. #define wait_event(wq, condition) \
  227. do { \
  228. might_sleep(); \
  229. if (condition) \
  230. break; \
  231. __wait_event(wq, condition); \
  232. } while (0)
  233. #define __io_wait_event(wq, condition) \
  234. (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
  235. io_schedule())
  236. /*
  237. * io_wait_event() -- like wait_event() but with io_schedule()
  238. */
  239. #define io_wait_event(wq, condition) \
  240. do { \
  241. might_sleep(); \
  242. if (condition) \
  243. break; \
  244. __io_wait_event(wq, condition); \
  245. } while (0)
  246. #define __wait_event_freezable(wq, condition) \
  247. ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
  248. schedule(); try_to_freeze())
  249. /**
  250. * wait_event - sleep (or freeze) until a condition gets true
  251. * @wq: the waitqueue to wait on
  252. * @condition: a C expression for the event to wait for
  253. *
  254. * The process is put to sleep (TASK_INTERRUPTIBLE -- so as not to contribute
  255. * to system load) until the @condition evaluates to true. The
  256. * @condition is checked each time the waitqueue @wq is woken up.
  257. *
  258. * wake_up() has to be called after changing any variable that could
  259. * change the result of the wait condition.
  260. */
  261. #define wait_event_freezable(wq, condition) \
  262. ({ \
  263. int __ret = 0; \
  264. might_sleep(); \
  265. if (!(condition)) \
  266. __ret = __wait_event_freezable(wq, condition); \
  267. __ret; \
  268. })
  269. #define __wait_event_timeout(wq, condition, timeout) \
  270. ___wait_event(wq, ___wait_cond_timeout(condition), \
  271. TASK_UNINTERRUPTIBLE, 0, timeout, \
  272. __ret = schedule_timeout(__ret))
  273. /**
  274. * wait_event_timeout - sleep until a condition gets true or a timeout elapses
  275. * @wq: the waitqueue to wait on
  276. * @condition: a C expression for the event to wait for
  277. * @timeout: timeout, in jiffies
  278. *
  279. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  280. * @condition evaluates to true. The @condition is checked each time
  281. * the waitqueue @wq is woken up.
  282. *
  283. * wake_up() has to be called after changing any variable that could
  284. * change the result of the wait condition.
  285. *
  286. * Returns:
  287. * 0 if the @condition evaluated to %false after the @timeout elapsed,
  288. * 1 if the @condition evaluated to %true after the @timeout elapsed,
  289. * or the remaining jiffies (at least 1) if the @condition evaluated
  290. * to %true before the @timeout elapsed.
  291. */
  292. #define wait_event_timeout(wq, condition, timeout) \
  293. ({ \
  294. long __ret = timeout; \
  295. might_sleep(); \
  296. if (!___wait_cond_timeout(condition)) \
  297. __ret = __wait_event_timeout(wq, condition, timeout); \
  298. __ret; \
  299. })
  300. #define __wait_event_freezable_timeout(wq, condition, timeout) \
  301. ___wait_event(wq, ___wait_cond_timeout(condition), \
  302. TASK_INTERRUPTIBLE, 0, timeout, \
  303. __ret = schedule_timeout(__ret); try_to_freeze())
  304. /*
  305. * like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid
  306. * increasing load and is freezable.
  307. */
  308. #define wait_event_freezable_timeout(wq, condition, timeout) \
  309. ({ \
  310. long __ret = timeout; \
  311. might_sleep(); \
  312. if (!___wait_cond_timeout(condition)) \
  313. __ret = __wait_event_freezable_timeout(wq, condition, timeout); \
  314. __ret; \
  315. })
  316. #define __wait_event_cmd(wq, condition, cmd1, cmd2) \
  317. (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
  318. cmd1; schedule(); cmd2)
  319. /**
  320. * wait_event_cmd - sleep until a condition gets true
  321. * @wq: the waitqueue to wait on
  322. * @condition: a C expression for the event to wait for
  323. * @cmd1: the command will be executed before sleep
  324. * @cmd2: the command will be executed after sleep
  325. *
  326. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  327. * @condition evaluates to true. The @condition is checked each time
  328. * the waitqueue @wq is woken up.
  329. *
  330. * wake_up() has to be called after changing any variable that could
  331. * change the result of the wait condition.
  332. */
  333. #define wait_event_cmd(wq, condition, cmd1, cmd2) \
  334. do { \
  335. might_sleep(); \
  336. if (condition) \
  337. break; \
  338. __wait_event_cmd(wq, condition, cmd1, cmd2); \
  339. } while (0)
  340. #define __wait_event_interruptible(wq, condition) \
  341. ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
  342. schedule())
  343. /**
  344. * wait_event_interruptible - sleep until a condition gets true
  345. * @wq: the waitqueue to wait on
  346. * @condition: a C expression for the event to wait for
  347. *
  348. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  349. * @condition evaluates to true or a signal is received.
  350. * The @condition is checked each time the waitqueue @wq is woken up.
  351. *
  352. * wake_up() has to be called after changing any variable that could
  353. * change the result of the wait condition.
  354. *
  355. * The function will return -ERESTARTSYS if it was interrupted by a
  356. * signal and 0 if @condition evaluated to true.
  357. */
  358. #define wait_event_interruptible(wq, condition) \
  359. ({ \
  360. int __ret = 0; \
  361. might_sleep(); \
  362. if (!(condition)) \
  363. __ret = __wait_event_interruptible(wq, condition); \
  364. __ret; \
  365. })
  366. #define __wait_event_interruptible_timeout(wq, condition, timeout) \
  367. ___wait_event(wq, ___wait_cond_timeout(condition), \
  368. TASK_INTERRUPTIBLE, 0, timeout, \
  369. __ret = schedule_timeout(__ret))
  370. /**
  371. * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
  372. * @wq: the waitqueue to wait on
  373. * @condition: a C expression for the event to wait for
  374. * @timeout: timeout, in jiffies
  375. *
  376. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  377. * @condition evaluates to true or a signal is received.
  378. * The @condition is checked each time the waitqueue @wq is woken up.
  379. *
  380. * wake_up() has to be called after changing any variable that could
  381. * change the result of the wait condition.
  382. *
  383. * Returns:
  384. * 0 if the @condition evaluated to %false after the @timeout elapsed,
  385. * 1 if the @condition evaluated to %true after the @timeout elapsed,
  386. * the remaining jiffies (at least 1) if the @condition evaluated
  387. * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
  388. * interrupted by a signal.
  389. */
  390. #define wait_event_interruptible_timeout(wq, condition, timeout) \
  391. ({ \
  392. long __ret = timeout; \
  393. might_sleep(); \
  394. if (!___wait_cond_timeout(condition)) \
  395. __ret = __wait_event_interruptible_timeout(wq, \
  396. condition, timeout); \
  397. __ret; \
  398. })
  399. #define __wait_event_hrtimeout(wq, condition, timeout, state) \
  400. ({ \
  401. int __ret = 0; \
  402. struct hrtimer_sleeper __t; \
  403. \
  404. hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC, \
  405. HRTIMER_MODE_REL); \
  406. hrtimer_init_sleeper(&__t, current); \
  407. if ((timeout).tv64 != KTIME_MAX) \
  408. hrtimer_start_range_ns(&__t.timer, timeout, \
  409. current->timer_slack_ns, \
  410. HRTIMER_MODE_REL); \
  411. \
  412. __ret = ___wait_event(wq, condition, state, 0, 0, \
  413. if (!__t.task) { \
  414. __ret = -ETIME; \
  415. break; \
  416. } \
  417. schedule()); \
  418. \
  419. hrtimer_cancel(&__t.timer); \
  420. destroy_hrtimer_on_stack(&__t.timer); \
  421. __ret; \
  422. })
  423. /**
  424. * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses
  425. * @wq: the waitqueue to wait on
  426. * @condition: a C expression for the event to wait for
  427. * @timeout: timeout, as a ktime_t
  428. *
  429. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  430. * @condition evaluates to true or a signal is received.
  431. * The @condition is checked each time the waitqueue @wq is woken up.
  432. *
  433. * wake_up() has to be called after changing any variable that could
  434. * change the result of the wait condition.
  435. *
  436. * The function returns 0 if @condition became true, or -ETIME if the timeout
  437. * elapsed.
  438. */
  439. #define wait_event_hrtimeout(wq, condition, timeout) \
  440. ({ \
  441. int __ret = 0; \
  442. might_sleep(); \
  443. if (!(condition)) \
  444. __ret = __wait_event_hrtimeout(wq, condition, timeout, \
  445. TASK_UNINTERRUPTIBLE); \
  446. __ret; \
  447. })
  448. /**
  449. * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
  450. * @wq: the waitqueue to wait on
  451. * @condition: a C expression for the event to wait for
  452. * @timeout: timeout, as a ktime_t
  453. *
  454. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  455. * @condition evaluates to true or a signal is received.
  456. * The @condition is checked each time the waitqueue @wq is woken up.
  457. *
  458. * wake_up() has to be called after changing any variable that could
  459. * change the result of the wait condition.
  460. *
  461. * The function returns 0 if @condition became true, -ERESTARTSYS if it was
  462. * interrupted by a signal, or -ETIME if the timeout elapsed.
  463. */
  464. #define wait_event_interruptible_hrtimeout(wq, condition, timeout) \
  465. ({ \
  466. long __ret = 0; \
  467. might_sleep(); \
  468. if (!(condition)) \
  469. __ret = __wait_event_hrtimeout(wq, condition, timeout, \
  470. TASK_INTERRUPTIBLE); \
  471. __ret; \
  472. })
  473. #define __wait_event_interruptible_exclusive(wq, condition) \
  474. ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
  475. schedule())
  476. #define wait_event_interruptible_exclusive(wq, condition) \
  477. ({ \
  478. int __ret = 0; \
  479. might_sleep(); \
  480. if (!(condition)) \
  481. __ret = __wait_event_interruptible_exclusive(wq, condition);\
  482. __ret; \
  483. })
  484. #define __wait_event_freezable_exclusive(wq, condition) \
  485. ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
  486. schedule(); try_to_freeze())
  487. #define wait_event_freezable_exclusive(wq, condition) \
  488. ({ \
  489. int __ret = 0; \
  490. might_sleep(); \
  491. if (!(condition)) \
  492. __ret = __wait_event_freezable_exclusive(wq, condition);\
  493. __ret; \
  494. })
  495. #define __wait_event_interruptible_locked(wq, condition, exclusive, irq) \
  496. ({ \
  497. int __ret = 0; \
  498. DEFINE_WAIT(__wait); \
  499. if (exclusive) \
  500. __wait.flags |= WQ_FLAG_EXCLUSIVE; \
  501. do { \
  502. if (likely(list_empty(&__wait.task_list))) \
  503. __add_wait_queue_tail(&(wq), &__wait); \
  504. set_current_state(TASK_INTERRUPTIBLE); \
  505. if (signal_pending(current)) { \
  506. __ret = -ERESTARTSYS; \
  507. break; \
  508. } \
  509. if (irq) \
  510. spin_unlock_irq(&(wq).lock); \
  511. else \
  512. spin_unlock(&(wq).lock); \
  513. schedule(); \
  514. if (irq) \
  515. spin_lock_irq(&(wq).lock); \
  516. else \
  517. spin_lock(&(wq).lock); \
  518. } while (!(condition)); \
  519. __remove_wait_queue(&(wq), &__wait); \
  520. __set_current_state(TASK_RUNNING); \
  521. __ret; \
  522. })
  523. /**
  524. * wait_event_interruptible_locked - sleep until a condition gets true
  525. * @wq: the waitqueue to wait on
  526. * @condition: a C expression for the event to wait for
  527. *
  528. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  529. * @condition evaluates to true or a signal is received.
  530. * The @condition is checked each time the waitqueue @wq is woken up.
  531. *
  532. * It must be called with wq.lock being held. This spinlock is
  533. * unlocked while sleeping but @condition testing is done while lock
  534. * is held and when this macro exits the lock is held.
  535. *
  536. * The lock is locked/unlocked using spin_lock()/spin_unlock()
  537. * functions which must match the way they are locked/unlocked outside
  538. * of this macro.
  539. *
  540. * wake_up_locked() has to be called after changing any variable that could
  541. * change the result of the wait condition.
  542. *
  543. * The function will return -ERESTARTSYS if it was interrupted by a
  544. * signal and 0 if @condition evaluated to true.
  545. */
  546. #define wait_event_interruptible_locked(wq, condition) \
  547. ((condition) \
  548. ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 0))
  549. /**
  550. * wait_event_interruptible_locked_irq - sleep until a condition gets true
  551. * @wq: the waitqueue to wait on
  552. * @condition: a C expression for the event to wait for
  553. *
  554. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  555. * @condition evaluates to true or a signal is received.
  556. * The @condition is checked each time the waitqueue @wq is woken up.
  557. *
  558. * It must be called with wq.lock being held. This spinlock is
  559. * unlocked while sleeping but @condition testing is done while lock
  560. * is held and when this macro exits the lock is held.
  561. *
  562. * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
  563. * functions which must match the way they are locked/unlocked outside
  564. * of this macro.
  565. *
  566. * wake_up_locked() has to be called after changing any variable that could
  567. * change the result of the wait condition.
  568. *
  569. * The function will return -ERESTARTSYS if it was interrupted by a
  570. * signal and 0 if @condition evaluated to true.
  571. */
  572. #define wait_event_interruptible_locked_irq(wq, condition) \
  573. ((condition) \
  574. ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 1))
  575. /**
  576. * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
  577. * @wq: the waitqueue to wait on
  578. * @condition: a C expression for the event to wait for
  579. *
  580. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  581. * @condition evaluates to true or a signal is received.
  582. * The @condition is checked each time the waitqueue @wq is woken up.
  583. *
  584. * It must be called with wq.lock being held. This spinlock is
  585. * unlocked while sleeping but @condition testing is done while lock
  586. * is held and when this macro exits the lock is held.
  587. *
  588. * The lock is locked/unlocked using spin_lock()/spin_unlock()
  589. * functions which must match the way they are locked/unlocked outside
  590. * of this macro.
  591. *
  592. * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
  593. * set thus when other process waits process on the list if this
  594. * process is awaken further processes are not considered.
  595. *
  596. * wake_up_locked() has to be called after changing any variable that could
  597. * change the result of the wait condition.
  598. *
  599. * The function will return -ERESTARTSYS if it was interrupted by a
  600. * signal and 0 if @condition evaluated to true.
  601. */
  602. #define wait_event_interruptible_exclusive_locked(wq, condition) \
  603. ((condition) \
  604. ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 0))
  605. /**
  606. * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
  607. * @wq: the waitqueue to wait on
  608. * @condition: a C expression for the event to wait for
  609. *
  610. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  611. * @condition evaluates to true or a signal is received.
  612. * The @condition is checked each time the waitqueue @wq is woken up.
  613. *
  614. * It must be called with wq.lock being held. This spinlock is
  615. * unlocked while sleeping but @condition testing is done while lock
  616. * is held and when this macro exits the lock is held.
  617. *
  618. * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
  619. * functions which must match the way they are locked/unlocked outside
  620. * of this macro.
  621. *
  622. * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
  623. * set thus when other process waits process on the list if this
  624. * process is awaken further processes are not considered.
  625. *
  626. * wake_up_locked() has to be called after changing any variable that could
  627. * change the result of the wait condition.
  628. *
  629. * The function will return -ERESTARTSYS if it was interrupted by a
  630. * signal and 0 if @condition evaluated to true.
  631. */
  632. #define wait_event_interruptible_exclusive_locked_irq(wq, condition) \
  633. ((condition) \
  634. ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1))
  635. #define __wait_event_killable(wq, condition) \
  636. ___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
  637. /**
  638. * wait_event_killable - sleep until a condition gets true
  639. * @wq: the waitqueue to wait on
  640. * @condition: a C expression for the event to wait for
  641. *
  642. * The process is put to sleep (TASK_KILLABLE) until the
  643. * @condition evaluates to true or a signal is received.
  644. * The @condition is checked each time the waitqueue @wq is woken up.
  645. *
  646. * wake_up() has to be called after changing any variable that could
  647. * change the result of the wait condition.
  648. *
  649. * The function will return -ERESTARTSYS if it was interrupted by a
  650. * signal and 0 if @condition evaluated to true.
  651. */
  652. #define wait_event_killable(wq, condition) \
  653. ({ \
  654. int __ret = 0; \
  655. might_sleep(); \
  656. if (!(condition)) \
  657. __ret = __wait_event_killable(wq, condition); \
  658. __ret; \
  659. })
  660. #define __wait_event_lock_irq(wq, condition, lock, cmd) \
  661. (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
  662. spin_unlock_irq(&lock); \
  663. cmd; \
  664. schedule(); \
  665. spin_lock_irq(&lock))
  666. /**
  667. * wait_event_lock_irq_cmd - sleep until a condition gets true. The
  668. * condition is checked under the lock. This
  669. * is expected to be called with the lock
  670. * taken.
  671. * @wq: the waitqueue to wait on
  672. * @condition: a C expression for the event to wait for
  673. * @lock: a locked spinlock_t, which will be released before cmd
  674. * and schedule() and reacquired afterwards.
  675. * @cmd: a command which is invoked outside the critical section before
  676. * sleep
  677. *
  678. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  679. * @condition evaluates to true. The @condition is checked each time
  680. * the waitqueue @wq is woken up.
  681. *
  682. * wake_up() has to be called after changing any variable that could
  683. * change the result of the wait condition.
  684. *
  685. * This is supposed to be called while holding the lock. The lock is
  686. * dropped before invoking the cmd and going to sleep and is reacquired
  687. * afterwards.
  688. */
  689. #define wait_event_lock_irq_cmd(wq, condition, lock, cmd) \
  690. do { \
  691. if (condition) \
  692. break; \
  693. __wait_event_lock_irq(wq, condition, lock, cmd); \
  694. } while (0)
  695. /**
  696. * wait_event_lock_irq - sleep until a condition gets true. The
  697. * condition is checked under the lock. This
  698. * is expected to be called with the lock
  699. * taken.
  700. * @wq: the waitqueue to wait on
  701. * @condition: a C expression for the event to wait for
  702. * @lock: a locked spinlock_t, which will be released before schedule()
  703. * and reacquired afterwards.
  704. *
  705. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  706. * @condition evaluates to true. The @condition is checked each time
  707. * the waitqueue @wq is woken up.
  708. *
  709. * wake_up() has to be called after changing any variable that could
  710. * change the result of the wait condition.
  711. *
  712. * This is supposed to be called while holding the lock. The lock is
  713. * dropped before going to sleep and is reacquired afterwards.
  714. */
  715. #define wait_event_lock_irq(wq, condition, lock) \
  716. do { \
  717. if (condition) \
  718. break; \
  719. __wait_event_lock_irq(wq, condition, lock, ); \
  720. } while (0)
  721. #define __wait_event_interruptible_lock_irq(wq, condition, lock, cmd) \
  722. ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
  723. spin_unlock_irq(&lock); \
  724. cmd; \
  725. schedule(); \
  726. spin_lock_irq(&lock))
  727. /**
  728. * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
  729. * The condition is checked under the lock. This is expected to
  730. * be called with the lock taken.
  731. * @wq: the waitqueue to wait on
  732. * @condition: a C expression for the event to wait for
  733. * @lock: a locked spinlock_t, which will be released before cmd and
  734. * schedule() and reacquired afterwards.
  735. * @cmd: a command which is invoked outside the critical section before
  736. * sleep
  737. *
  738. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  739. * @condition evaluates to true or a signal is received. The @condition is
  740. * checked each time the waitqueue @wq is woken up.
  741. *
  742. * wake_up() has to be called after changing any variable that could
  743. * change the result of the wait condition.
  744. *
  745. * This is supposed to be called while holding the lock. The lock is
  746. * dropped before invoking the cmd and going to sleep and is reacquired
  747. * afterwards.
  748. *
  749. * The macro will return -ERESTARTSYS if it was interrupted by a signal
  750. * and 0 if @condition evaluated to true.
  751. */
  752. #define wait_event_interruptible_lock_irq_cmd(wq, condition, lock, cmd) \
  753. ({ \
  754. int __ret = 0; \
  755. if (!(condition)) \
  756. __ret = __wait_event_interruptible_lock_irq(wq, \
  757. condition, lock, cmd); \
  758. __ret; \
  759. })
  760. /**
  761. * wait_event_interruptible_lock_irq - sleep until a condition gets true.
  762. * The condition is checked under the lock. This is expected
  763. * to be called with the lock taken.
  764. * @wq: the waitqueue to wait on
  765. * @condition: a C expression for the event to wait for
  766. * @lock: a locked spinlock_t, which will be released before schedule()
  767. * and reacquired afterwards.
  768. *
  769. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  770. * @condition evaluates to true or signal is received. The @condition is
  771. * checked each time the waitqueue @wq is woken up.
  772. *
  773. * wake_up() has to be called after changing any variable that could
  774. * change the result of the wait condition.
  775. *
  776. * This is supposed to be called while holding the lock. The lock is
  777. * dropped before going to sleep and is reacquired afterwards.
  778. *
  779. * The macro will return -ERESTARTSYS if it was interrupted by a signal
  780. * and 0 if @condition evaluated to true.
  781. */
  782. #define wait_event_interruptible_lock_irq(wq, condition, lock) \
  783. ({ \
  784. int __ret = 0; \
  785. if (!(condition)) \
  786. __ret = __wait_event_interruptible_lock_irq(wq, \
  787. condition, lock,); \
  788. __ret; \
  789. })
  790. #define __wait_event_interruptible_lock_irq_timeout(wq, condition, \
  791. lock, timeout) \
  792. ___wait_event(wq, ___wait_cond_timeout(condition), \
  793. TASK_INTERRUPTIBLE, 0, timeout, \
  794. spin_unlock_irq(&lock); \
  795. __ret = schedule_timeout(__ret); \
  796. spin_lock_irq(&lock));
  797. /**
  798. * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets
  799. * true or a timeout elapses. The condition is checked under
  800. * the lock. This is expected to be called with the lock taken.
  801. * @wq: the waitqueue to wait on
  802. * @condition: a C expression for the event to wait for
  803. * @lock: a locked spinlock_t, which will be released before schedule()
  804. * and reacquired afterwards.
  805. * @timeout: timeout, in jiffies
  806. *
  807. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  808. * @condition evaluates to true or signal is received. The @condition is
  809. * checked each time the waitqueue @wq is woken up.
  810. *
  811. * wake_up() has to be called after changing any variable that could
  812. * change the result of the wait condition.
  813. *
  814. * This is supposed to be called while holding the lock. The lock is
  815. * dropped before going to sleep and is reacquired afterwards.
  816. *
  817. * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
  818. * was interrupted by a signal, and the remaining jiffies otherwise
  819. * if the condition evaluated to true before the timeout elapsed.
  820. */
  821. #define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \
  822. timeout) \
  823. ({ \
  824. long __ret = timeout; \
  825. if (!___wait_cond_timeout(condition)) \
  826. __ret = __wait_event_interruptible_lock_irq_timeout( \
  827. wq, condition, lock, timeout); \
  828. __ret; \
  829. })
  830. /*
  831. * Waitqueues which are removed from the waitqueue_head at wakeup time
  832. */
  833. void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
  834. void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state);
  835. long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state);
  836. void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
  837. void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, unsigned int mode, void *key);
  838. long wait_woken(wait_queue_t *wait, unsigned mode, long timeout);
  839. int woken_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
  840. int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
  841. int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
  842. #define DEFINE_WAIT_FUNC(name, function) \
  843. wait_queue_t name = { \
  844. .private = current, \
  845. .func = function, \
  846. .task_list = LIST_HEAD_INIT((name).task_list), \
  847. }
  848. #define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
  849. #define DEFINE_WAIT_BIT(name, word, bit) \
  850. struct wait_bit_queue name = { \
  851. .key = __WAIT_BIT_KEY_INITIALIZER(word, bit), \
  852. .wait = { \
  853. .private = current, \
  854. .func = wake_bit_function, \
  855. .task_list = \
  856. LIST_HEAD_INIT((name).wait.task_list), \
  857. }, \
  858. }
  859. #define init_wait(wait) \
  860. do { \
  861. (wait)->private = current; \
  862. (wait)->func = autoremove_wake_function; \
  863. INIT_LIST_HEAD(&(wait)->task_list); \
  864. (wait)->flags = 0; \
  865. } while (0)
  866. extern int bit_wait(struct wait_bit_key *);
  867. extern int bit_wait_io(struct wait_bit_key *);
  868. extern int bit_wait_timeout(struct wait_bit_key *);
  869. extern int bit_wait_io_timeout(struct wait_bit_key *);
  870. /**
  871. * wait_on_bit - wait for a bit to be cleared
  872. * @word: the word being waited on, a kernel virtual address
  873. * @bit: the bit of the word being waited on
  874. * @mode: the task state to sleep in
  875. *
  876. * There is a standard hashed waitqueue table for generic use. This
  877. * is the part of the hashtable's accessor API that waits on a bit.
  878. * For instance, if one were to have waiters on a bitflag, one would
  879. * call wait_on_bit() in threads waiting for the bit to clear.
  880. * One uses wait_on_bit() where one is waiting for the bit to clear,
  881. * but has no intention of setting it.
  882. * Returned value will be zero if the bit was cleared, or non-zero
  883. * if the process received a signal and the mode permitted wakeup
  884. * on that signal.
  885. */
  886. static inline int
  887. wait_on_bit(void *word, int bit, unsigned mode)
  888. {
  889. might_sleep();
  890. if (!test_bit(bit, word))
  891. return 0;
  892. return out_of_line_wait_on_bit(word, bit,
  893. bit_wait,
  894. mode);
  895. }
  896. /**
  897. * wait_on_bit_io - wait for a bit to be cleared
  898. * @word: the word being waited on, a kernel virtual address
  899. * @bit: the bit of the word being waited on
  900. * @mode: the task state to sleep in
  901. *
  902. * Use the standard hashed waitqueue table to wait for a bit
  903. * to be cleared. This is similar to wait_on_bit(), but calls
  904. * io_schedule() instead of schedule() for the actual waiting.
  905. *
  906. * Returned value will be zero if the bit was cleared, or non-zero
  907. * if the process received a signal and the mode permitted wakeup
  908. * on that signal.
  909. */
  910. static inline int
  911. wait_on_bit_io(void *word, int bit, unsigned mode)
  912. {
  913. might_sleep();
  914. if (!test_bit(bit, word))
  915. return 0;
  916. return out_of_line_wait_on_bit(word, bit,
  917. bit_wait_io,
  918. mode);
  919. }
  920. /**
  921. * wait_on_bit_action - wait for a bit to be cleared
  922. * @word: the word being waited on, a kernel virtual address
  923. * @bit: the bit of the word being waited on
  924. * @action: the function used to sleep, which may take special actions
  925. * @mode: the task state to sleep in
  926. *
  927. * Use the standard hashed waitqueue table to wait for a bit
  928. * to be cleared, and allow the waiting action to be specified.
  929. * This is like wait_on_bit() but allows fine control of how the waiting
  930. * is done.
  931. *
  932. * Returned value will be zero if the bit was cleared, or non-zero
  933. * if the process received a signal and the mode permitted wakeup
  934. * on that signal.
  935. */
  936. static inline int
  937. wait_on_bit_action(void *word, int bit, wait_bit_action_f *action, unsigned mode)
  938. {
  939. might_sleep();
  940. if (!test_bit(bit, word))
  941. return 0;
  942. return out_of_line_wait_on_bit(word, bit, action, mode);
  943. }
  944. /**
  945. * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it
  946. * @word: the word being waited on, a kernel virtual address
  947. * @bit: the bit of the word being waited on
  948. * @mode: the task state to sleep in
  949. *
  950. * There is a standard hashed waitqueue table for generic use. This
  951. * is the part of the hashtable's accessor API that waits on a bit
  952. * when one intends to set it, for instance, trying to lock bitflags.
  953. * For instance, if one were to have waiters trying to set bitflag
  954. * and waiting for it to clear before setting it, one would call
  955. * wait_on_bit() in threads waiting to be able to set the bit.
  956. * One uses wait_on_bit_lock() where one is waiting for the bit to
  957. * clear with the intention of setting it, and when done, clearing it.
  958. *
  959. * Returns zero if the bit was (eventually) found to be clear and was
  960. * set. Returns non-zero if a signal was delivered to the process and
  961. * the @mode allows that signal to wake the process.
  962. */
  963. static inline int
  964. wait_on_bit_lock(void *word, int bit, unsigned mode)
  965. {
  966. might_sleep();
  967. if (!test_and_set_bit(bit, word))
  968. return 0;
  969. return out_of_line_wait_on_bit_lock(word, bit, bit_wait, mode);
  970. }
  971. /**
  972. * wait_on_bit_lock_io - wait for a bit to be cleared, when wanting to set it
  973. * @word: the word being waited on, a kernel virtual address
  974. * @bit: the bit of the word being waited on
  975. * @mode: the task state to sleep in
  976. *
  977. * Use the standard hashed waitqueue table to wait for a bit
  978. * to be cleared and then to atomically set it. This is similar
  979. * to wait_on_bit(), but calls io_schedule() instead of schedule()
  980. * for the actual waiting.
  981. *
  982. * Returns zero if the bit was (eventually) found to be clear and was
  983. * set. Returns non-zero if a signal was delivered to the process and
  984. * the @mode allows that signal to wake the process.
  985. */
  986. static inline int
  987. wait_on_bit_lock_io(void *word, int bit, unsigned mode)
  988. {
  989. might_sleep();
  990. if (!test_and_set_bit(bit, word))
  991. return 0;
  992. return out_of_line_wait_on_bit_lock(word, bit, bit_wait_io, mode);
  993. }
  994. /**
  995. * wait_on_bit_lock_action - wait for a bit to be cleared, when wanting to set it
  996. * @word: the word being waited on, a kernel virtual address
  997. * @bit: the bit of the word being waited on
  998. * @action: the function used to sleep, which may take special actions
  999. * @mode: the task state to sleep in
  1000. *
  1001. * Use the standard hashed waitqueue table to wait for a bit
  1002. * to be cleared and then to set it, and allow the waiting action
  1003. * to be specified.
  1004. * This is like wait_on_bit() but allows fine control of how the waiting
  1005. * is done.
  1006. *
  1007. * Returns zero if the bit was (eventually) found to be clear and was
  1008. * set. Returns non-zero if a signal was delivered to the process and
  1009. * the @mode allows that signal to wake the process.
  1010. */
  1011. static inline int
  1012. wait_on_bit_lock_action(void *word, int bit, wait_bit_action_f *action, unsigned mode)
  1013. {
  1014. might_sleep();
  1015. if (!test_and_set_bit(bit, word))
  1016. return 0;
  1017. return out_of_line_wait_on_bit_lock(word, bit, action, mode);
  1018. }
  1019. /**
  1020. * wait_on_atomic_t - Wait for an atomic_t to become 0
  1021. * @val: The atomic value being waited on, a kernel virtual address
  1022. * @action: the function used to sleep, which may take special actions
  1023. * @mode: the task state to sleep in
  1024. *
  1025. * Wait for an atomic_t to become 0. We abuse the bit-wait waitqueue table for
  1026. * the purpose of getting a waitqueue, but we set the key to a bit number
  1027. * outside of the target 'word'.
  1028. */
  1029. static inline
  1030. int wait_on_atomic_t(atomic_t *val, int (*action)(atomic_t *), unsigned mode)
  1031. {
  1032. might_sleep();
  1033. if (atomic_read(val) == 0)
  1034. return 0;
  1035. return out_of_line_wait_on_atomic_t(val, action, mode);
  1036. }
  1037. #endif /* _LINUX_WAIT_H */