wait.h 41 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229
  1. #ifndef _LINUX_WAIT_H
  2. #define _LINUX_WAIT_H
  3. /*
  4. * Linux wait queue related types and methods
  5. */
  6. #include <linux/list.h>
  7. #include <linux/stddef.h>
  8. #include <linux/spinlock.h>
  9. #include <asm/current.h>
  10. #include <uapi/linux/wait.h>
  11. typedef struct __wait_queue wait_queue_t;
  12. typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
  13. int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key);
  14. /* __wait_queue::flags */
  15. #define WQ_FLAG_EXCLUSIVE 0x01
  16. #define WQ_FLAG_WOKEN 0x02
  17. struct __wait_queue {
  18. unsigned int flags;
  19. void *private;
  20. wait_queue_func_t func;
  21. struct list_head task_list;
  22. };
  23. struct wait_bit_key {
  24. void *flags;
  25. int bit_nr;
  26. #define WAIT_ATOMIC_T_BIT_NR -1
  27. unsigned long timeout;
  28. };
  29. struct wait_bit_queue {
  30. struct wait_bit_key key;
  31. wait_queue_t wait;
  32. };
  33. struct __wait_queue_head {
  34. spinlock_t lock;
  35. struct list_head task_list;
  36. };
  37. typedef struct __wait_queue_head wait_queue_head_t;
  38. struct task_struct;
  39. /*
  40. * Macros for declaration and initialisaton of the datatypes
  41. */
  42. #define __WAITQUEUE_INITIALIZER(name, tsk) { \
  43. .private = tsk, \
  44. .func = default_wake_function, \
  45. .task_list = { NULL, NULL } }
  46. #define DECLARE_WAITQUEUE(name, tsk) \
  47. wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)
  48. #define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
  49. .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
  50. .task_list = { &(name).task_list, &(name).task_list } }
  51. #define DECLARE_WAIT_QUEUE_HEAD(name) \
  52. wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
  53. #define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
  54. { .flags = word, .bit_nr = bit, }
  55. #define __WAIT_ATOMIC_T_KEY_INITIALIZER(p) \
  56. { .flags = p, .bit_nr = WAIT_ATOMIC_T_BIT_NR, }
  57. extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *);
  58. #define init_waitqueue_head(q) \
  59. do { \
  60. static struct lock_class_key __key; \
  61. \
  62. __init_waitqueue_head((q), #q, &__key); \
  63. } while (0)
  64. #ifdef CONFIG_LOCKDEP
  65. # define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
  66. ({ init_waitqueue_head(&name); name; })
  67. # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
  68. wait_queue_head_t name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
  69. #else
  70. # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
  71. #endif
  72. static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
  73. {
  74. q->flags = 0;
  75. q->private = p;
  76. q->func = default_wake_function;
  77. }
  78. static inline void
  79. init_waitqueue_func_entry(wait_queue_t *q, wait_queue_func_t func)
  80. {
  81. q->flags = 0;
  82. q->private = NULL;
  83. q->func = func;
  84. }
  85. /**
  86. * waitqueue_active -- locklessly test for waiters on the queue
  87. * @q: the waitqueue to test for waiters
  88. *
  89. * returns true if the wait list is not empty
  90. *
  91. * NOTE: this function is lockless and requires care, incorrect usage _will_
  92. * lead to sporadic and non-obvious failure.
  93. *
  94. * Use either while holding wait_queue_head_t::lock or when used for wakeups
  95. * with an extra smp_mb() like:
  96. *
  97. * CPU0 - waker CPU1 - waiter
  98. *
  99. * for (;;) {
  100. * @cond = true; prepare_to_wait(&wq, &wait, state);
  101. * smp_mb(); // smp_mb() from set_current_state()
  102. * if (waitqueue_active(wq)) if (@cond)
  103. * wake_up(wq); break;
  104. * schedule();
  105. * }
  106. * finish_wait(&wq, &wait);
  107. *
  108. * Because without the explicit smp_mb() it's possible for the
  109. * waitqueue_active() load to get hoisted over the @cond store such that we'll
  110. * observe an empty wait list while the waiter might not observe @cond.
  111. *
  112. * Also note that this 'optimization' trades a spin_lock() for an smp_mb(),
  113. * which (when the lock is uncontended) are of roughly equal cost.
  114. */
  115. static inline int waitqueue_active(wait_queue_head_t *q)
  116. {
  117. return !list_empty(&q->task_list);
  118. }
  119. /**
  120. * wq_has_sleeper - check if there are any waiting processes
  121. * @wq: wait queue head
  122. *
  123. * Returns true if wq has waiting processes
  124. *
  125. * Please refer to the comment for waitqueue_active.
  126. */
  127. static inline bool wq_has_sleeper(wait_queue_head_t *wq)
  128. {
  129. /*
  130. * We need to be sure we are in sync with the
  131. * add_wait_queue modifications to the wait queue.
  132. *
  133. * This memory barrier should be paired with one on the
  134. * waiting side.
  135. */
  136. smp_mb();
  137. return waitqueue_active(wq);
  138. }
  139. extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
  140. extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait);
  141. extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
  142. static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
  143. {
  144. list_add(&new->task_list, &head->task_list);
  145. }
  146. /*
  147. * Used for wake-one threads:
  148. */
  149. static inline void
  150. __add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
  151. {
  152. wait->flags |= WQ_FLAG_EXCLUSIVE;
  153. __add_wait_queue(q, wait);
  154. }
  155. static inline void __add_wait_queue_tail(wait_queue_head_t *head,
  156. wait_queue_t *new)
  157. {
  158. list_add_tail(&new->task_list, &head->task_list);
  159. }
  160. static inline void
  161. __add_wait_queue_tail_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
  162. {
  163. wait->flags |= WQ_FLAG_EXCLUSIVE;
  164. __add_wait_queue_tail(q, wait);
  165. }
  166. static inline void
  167. __remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old)
  168. {
  169. list_del(&old->task_list);
  170. }
  171. typedef int wait_bit_action_f(struct wait_bit_key *, int mode);
  172. void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
  173. void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
  174. void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
  175. void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr);
  176. void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
  177. void __wake_up_bit(wait_queue_head_t *, void *, int);
  178. int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned);
  179. int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned);
  180. void wake_up_bit(void *, int);
  181. void wake_up_atomic_t(atomic_t *);
  182. int out_of_line_wait_on_bit(void *, int, wait_bit_action_f *, unsigned);
  183. int out_of_line_wait_on_bit_timeout(void *, int, wait_bit_action_f *, unsigned, unsigned long);
  184. int out_of_line_wait_on_bit_lock(void *, int, wait_bit_action_f *, unsigned);
  185. int out_of_line_wait_on_atomic_t(atomic_t *, int (*)(atomic_t *), unsigned);
  186. wait_queue_head_t *bit_waitqueue(void *, int);
  187. #define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
  188. #define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
  189. #define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL)
  190. #define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1)
  191. #define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0)
  192. #define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
  193. #define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
  194. #define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
  195. #define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
  196. /*
  197. * Wakeup macros to be used to report events to the targets.
  198. */
  199. #define wake_up_poll(x, m) \
  200. __wake_up(x, TASK_NORMAL, 1, (void *) (m))
  201. #define wake_up_locked_poll(x, m) \
  202. __wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
  203. #define wake_up_interruptible_poll(x, m) \
  204. __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
  205. #define wake_up_interruptible_sync_poll(x, m) \
  206. __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))
  207. #define ___wait_cond_timeout(condition) \
  208. ({ \
  209. bool __cond = (condition); \
  210. if (__cond && !__ret) \
  211. __ret = 1; \
  212. __cond || !__ret; \
  213. })
  214. #define ___wait_is_interruptible(state) \
  215. (!__builtin_constant_p(state) || \
  216. state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE) \
  217. /*
  218. * The below macro ___wait_event() has an explicit shadow of the __ret
  219. * variable when used from the wait_event_*() macros.
  220. *
  221. * This is so that both can use the ___wait_cond_timeout() construct
  222. * to wrap the condition.
  223. *
  224. * The type inconsistency of the wait_event_*() __ret variable is also
  225. * on purpose; we use long where we can return timeout values and int
  226. * otherwise.
  227. */
  228. #define ___wait_event(wq, condition, state, exclusive, ret, cmd) \
  229. ({ \
  230. __label__ __out; \
  231. wait_queue_t __wait; \
  232. long __ret = ret; /* explicit shadow */ \
  233. \
  234. INIT_LIST_HEAD(&__wait.task_list); \
  235. if (exclusive) \
  236. __wait.flags = WQ_FLAG_EXCLUSIVE; \
  237. else \
  238. __wait.flags = 0; \
  239. \
  240. for (;;) { \
  241. long __int = prepare_to_wait_event(&wq, &__wait, state);\
  242. \
  243. if (condition) \
  244. break; \
  245. \
  246. if (___wait_is_interruptible(state) && __int) { \
  247. __ret = __int; \
  248. goto __out; \
  249. } \
  250. \
  251. cmd; \
  252. } \
  253. finish_wait(&wq, &__wait); \
  254. __out: __ret; \
  255. })
  256. #define __wait_event(wq, condition) \
  257. (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
  258. schedule())
  259. /**
  260. * wait_event - sleep until a condition gets true
  261. * @wq: the waitqueue to wait on
  262. * @condition: a C expression for the event to wait for
  263. *
  264. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  265. * @condition evaluates to true. The @condition is checked each time
  266. * the waitqueue @wq is woken up.
  267. *
  268. * wake_up() has to be called after changing any variable that could
  269. * change the result of the wait condition.
  270. */
  271. #define wait_event(wq, condition) \
  272. do { \
  273. might_sleep(); \
  274. if (condition) \
  275. break; \
  276. __wait_event(wq, condition); \
  277. } while (0)
  278. #define __io_wait_event(wq, condition) \
  279. (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
  280. io_schedule())
  281. /*
  282. * io_wait_event() -- like wait_event() but with io_schedule()
  283. */
  284. #define io_wait_event(wq, condition) \
  285. do { \
  286. might_sleep(); \
  287. if (condition) \
  288. break; \
  289. __io_wait_event(wq, condition); \
  290. } while (0)
  291. #define __wait_event_freezable(wq, condition) \
  292. ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
  293. schedule(); try_to_freeze())
  294. /**
  295. * wait_event_freezable - sleep (or freeze) until a condition gets true
  296. * @wq: the waitqueue to wait on
  297. * @condition: a C expression for the event to wait for
  298. *
  299. * The process is put to sleep (TASK_INTERRUPTIBLE -- so as not to contribute
  300. * to system load) until the @condition evaluates to true. The
  301. * @condition is checked each time the waitqueue @wq is woken up.
  302. *
  303. * wake_up() has to be called after changing any variable that could
  304. * change the result of the wait condition.
  305. */
  306. #define wait_event_freezable(wq, condition) \
  307. ({ \
  308. int __ret = 0; \
  309. might_sleep(); \
  310. if (!(condition)) \
  311. __ret = __wait_event_freezable(wq, condition); \
  312. __ret; \
  313. })
  314. #define __wait_event_timeout(wq, condition, timeout) \
  315. ___wait_event(wq, ___wait_cond_timeout(condition), \
  316. TASK_UNINTERRUPTIBLE, 0, timeout, \
  317. __ret = schedule_timeout(__ret))
  318. /**
  319. * wait_event_timeout - sleep until a condition gets true or a timeout elapses
  320. * @wq: the waitqueue to wait on
  321. * @condition: a C expression for the event to wait for
  322. * @timeout: timeout, in jiffies
  323. *
  324. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  325. * @condition evaluates to true. The @condition is checked each time
  326. * the waitqueue @wq is woken up.
  327. *
  328. * wake_up() has to be called after changing any variable that could
  329. * change the result of the wait condition.
  330. *
  331. * Returns:
  332. * 0 if the @condition evaluated to %false after the @timeout elapsed,
  333. * 1 if the @condition evaluated to %true after the @timeout elapsed,
  334. * or the remaining jiffies (at least 1) if the @condition evaluated
  335. * to %true before the @timeout elapsed.
  336. */
  337. #define wait_event_timeout(wq, condition, timeout) \
  338. ({ \
  339. long __ret = timeout; \
  340. might_sleep(); \
  341. if (!___wait_cond_timeout(condition)) \
  342. __ret = __wait_event_timeout(wq, condition, timeout); \
  343. __ret; \
  344. })
  345. #define __wait_event_freezable_timeout(wq, condition, timeout) \
  346. ___wait_event(wq, ___wait_cond_timeout(condition), \
  347. TASK_INTERRUPTIBLE, 0, timeout, \
  348. __ret = schedule_timeout(__ret); try_to_freeze())
  349. /*
  350. * like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid
  351. * increasing load and is freezable.
  352. */
  353. #define wait_event_freezable_timeout(wq, condition, timeout) \
  354. ({ \
  355. long __ret = timeout; \
  356. might_sleep(); \
  357. if (!___wait_cond_timeout(condition)) \
  358. __ret = __wait_event_freezable_timeout(wq, condition, timeout); \
  359. __ret; \
  360. })
  361. #define __wait_event_exclusive_cmd(wq, condition, cmd1, cmd2) \
  362. (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 1, 0, \
  363. cmd1; schedule(); cmd2)
  364. /*
  365. * Just like wait_event_cmd(), except it sets exclusive flag
  366. */
  367. #define wait_event_exclusive_cmd(wq, condition, cmd1, cmd2) \
  368. do { \
  369. if (condition) \
  370. break; \
  371. __wait_event_exclusive_cmd(wq, condition, cmd1, cmd2); \
  372. } while (0)
  373. #define __wait_event_cmd(wq, condition, cmd1, cmd2) \
  374. (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
  375. cmd1; schedule(); cmd2)
  376. /**
  377. * wait_event_cmd - sleep until a condition gets true
  378. * @wq: the waitqueue to wait on
  379. * @condition: a C expression for the event to wait for
  380. * @cmd1: the command will be executed before sleep
  381. * @cmd2: the command will be executed after sleep
  382. *
  383. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  384. * @condition evaluates to true. The @condition is checked each time
  385. * the waitqueue @wq is woken up.
  386. *
  387. * wake_up() has to be called after changing any variable that could
  388. * change the result of the wait condition.
  389. */
  390. #define wait_event_cmd(wq, condition, cmd1, cmd2) \
  391. do { \
  392. if (condition) \
  393. break; \
  394. __wait_event_cmd(wq, condition, cmd1, cmd2); \
  395. } while (0)
  396. #define __wait_event_interruptible(wq, condition) \
  397. ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
  398. schedule())
  399. /**
  400. * wait_event_interruptible - sleep until a condition gets true
  401. * @wq: the waitqueue to wait on
  402. * @condition: a C expression for the event to wait for
  403. *
  404. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  405. * @condition evaluates to true or a signal is received.
  406. * The @condition is checked each time the waitqueue @wq is woken up.
  407. *
  408. * wake_up() has to be called after changing any variable that could
  409. * change the result of the wait condition.
  410. *
  411. * The function will return -ERESTARTSYS if it was interrupted by a
  412. * signal and 0 if @condition evaluated to true.
  413. */
  414. #define wait_event_interruptible(wq, condition) \
  415. ({ \
  416. int __ret = 0; \
  417. might_sleep(); \
  418. if (!(condition)) \
  419. __ret = __wait_event_interruptible(wq, condition); \
  420. __ret; \
  421. })
  422. #define __wait_event_interruptible_timeout(wq, condition, timeout) \
  423. ___wait_event(wq, ___wait_cond_timeout(condition), \
  424. TASK_INTERRUPTIBLE, 0, timeout, \
  425. __ret = schedule_timeout(__ret))
  426. /**
  427. * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
  428. * @wq: the waitqueue to wait on
  429. * @condition: a C expression for the event to wait for
  430. * @timeout: timeout, in jiffies
  431. *
  432. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  433. * @condition evaluates to true or a signal is received.
  434. * The @condition is checked each time the waitqueue @wq is woken up.
  435. *
  436. * wake_up() has to be called after changing any variable that could
  437. * change the result of the wait condition.
  438. *
  439. * Returns:
  440. * 0 if the @condition evaluated to %false after the @timeout elapsed,
  441. * 1 if the @condition evaluated to %true after the @timeout elapsed,
  442. * the remaining jiffies (at least 1) if the @condition evaluated
  443. * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
  444. * interrupted by a signal.
  445. */
  446. #define wait_event_interruptible_timeout(wq, condition, timeout) \
  447. ({ \
  448. long __ret = timeout; \
  449. might_sleep(); \
  450. if (!___wait_cond_timeout(condition)) \
  451. __ret = __wait_event_interruptible_timeout(wq, \
  452. condition, timeout); \
  453. __ret; \
  454. })
  455. #define __wait_event_hrtimeout(wq, condition, timeout, state) \
  456. ({ \
  457. int __ret = 0; \
  458. struct hrtimer_sleeper __t; \
  459. \
  460. hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC, \
  461. HRTIMER_MODE_REL); \
  462. hrtimer_init_sleeper(&__t, current); \
  463. if ((timeout).tv64 != KTIME_MAX) \
  464. hrtimer_start_range_ns(&__t.timer, timeout, \
  465. current->timer_slack_ns, \
  466. HRTIMER_MODE_REL); \
  467. \
  468. __ret = ___wait_event(wq, condition, state, 0, 0, \
  469. if (!__t.task) { \
  470. __ret = -ETIME; \
  471. break; \
  472. } \
  473. schedule()); \
  474. \
  475. hrtimer_cancel(&__t.timer); \
  476. destroy_hrtimer_on_stack(&__t.timer); \
  477. __ret; \
  478. })
  479. /**
  480. * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses
  481. * @wq: the waitqueue to wait on
  482. * @condition: a C expression for the event to wait for
  483. * @timeout: timeout, as a ktime_t
  484. *
  485. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  486. * @condition evaluates to true or a signal is received.
  487. * The @condition is checked each time the waitqueue @wq is woken up.
  488. *
  489. * wake_up() has to be called after changing any variable that could
  490. * change the result of the wait condition.
  491. *
  492. * The function returns 0 if @condition became true, or -ETIME if the timeout
  493. * elapsed.
  494. */
  495. #define wait_event_hrtimeout(wq, condition, timeout) \
  496. ({ \
  497. int __ret = 0; \
  498. might_sleep(); \
  499. if (!(condition)) \
  500. __ret = __wait_event_hrtimeout(wq, condition, timeout, \
  501. TASK_UNINTERRUPTIBLE); \
  502. __ret; \
  503. })
  504. /**
  505. * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
  506. * @wq: the waitqueue to wait on
  507. * @condition: a C expression for the event to wait for
  508. * @timeout: timeout, as a ktime_t
  509. *
  510. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  511. * @condition evaluates to true or a signal is received.
  512. * The @condition is checked each time the waitqueue @wq is woken up.
  513. *
  514. * wake_up() has to be called after changing any variable that could
  515. * change the result of the wait condition.
  516. *
  517. * The function returns 0 if @condition became true, -ERESTARTSYS if it was
  518. * interrupted by a signal, or -ETIME if the timeout elapsed.
  519. */
  520. #define wait_event_interruptible_hrtimeout(wq, condition, timeout) \
  521. ({ \
  522. long __ret = 0; \
  523. might_sleep(); \
  524. if (!(condition)) \
  525. __ret = __wait_event_hrtimeout(wq, condition, timeout, \
  526. TASK_INTERRUPTIBLE); \
  527. __ret; \
  528. })
  529. #define __wait_event_interruptible_exclusive(wq, condition) \
  530. ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
  531. schedule())
  532. #define wait_event_interruptible_exclusive(wq, condition) \
  533. ({ \
  534. int __ret = 0; \
  535. might_sleep(); \
  536. if (!(condition)) \
  537. __ret = __wait_event_interruptible_exclusive(wq, condition);\
  538. __ret; \
  539. })
  540. #define __wait_event_killable_exclusive(wq, condition) \
  541. ___wait_event(wq, condition, TASK_KILLABLE, 1, 0, \
  542. schedule())
  543. #define wait_event_killable_exclusive(wq, condition) \
  544. ({ \
  545. int __ret = 0; \
  546. might_sleep(); \
  547. if (!(condition)) \
  548. __ret = __wait_event_killable_exclusive(wq, condition); \
  549. __ret; \
  550. })
  551. #define __wait_event_freezable_exclusive(wq, condition) \
  552. ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
  553. schedule(); try_to_freeze())
  554. #define wait_event_freezable_exclusive(wq, condition) \
  555. ({ \
  556. int __ret = 0; \
  557. might_sleep(); \
  558. if (!(condition)) \
  559. __ret = __wait_event_freezable_exclusive(wq, condition);\
  560. __ret; \
  561. })
  562. #define __wait_event_interruptible_locked(wq, condition, exclusive, irq) \
  563. ({ \
  564. int __ret = 0; \
  565. DEFINE_WAIT(__wait); \
  566. if (exclusive) \
  567. __wait.flags |= WQ_FLAG_EXCLUSIVE; \
  568. do { \
  569. if (likely(list_empty(&__wait.task_list))) \
  570. __add_wait_queue_tail(&(wq), &__wait); \
  571. set_current_state(TASK_INTERRUPTIBLE); \
  572. if (signal_pending(current)) { \
  573. __ret = -ERESTARTSYS; \
  574. break; \
  575. } \
  576. if (irq) \
  577. spin_unlock_irq(&(wq).lock); \
  578. else \
  579. spin_unlock(&(wq).lock); \
  580. schedule(); \
  581. if (irq) \
  582. spin_lock_irq(&(wq).lock); \
  583. else \
  584. spin_lock(&(wq).lock); \
  585. } while (!(condition)); \
  586. __remove_wait_queue(&(wq), &__wait); \
  587. __set_current_state(TASK_RUNNING); \
  588. __ret; \
  589. })
  590. /**
  591. * wait_event_interruptible_locked - sleep until a condition gets true
  592. * @wq: the waitqueue to wait on
  593. * @condition: a C expression for the event to wait for
  594. *
  595. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  596. * @condition evaluates to true or a signal is received.
  597. * The @condition is checked each time the waitqueue @wq is woken up.
  598. *
  599. * It must be called with wq.lock being held. This spinlock is
  600. * unlocked while sleeping but @condition testing is done while lock
  601. * is held and when this macro exits the lock is held.
  602. *
  603. * The lock is locked/unlocked using spin_lock()/spin_unlock()
  604. * functions which must match the way they are locked/unlocked outside
  605. * of this macro.
  606. *
  607. * wake_up_locked() has to be called after changing any variable that could
  608. * change the result of the wait condition.
  609. *
  610. * The function will return -ERESTARTSYS if it was interrupted by a
  611. * signal and 0 if @condition evaluated to true.
  612. */
  613. #define wait_event_interruptible_locked(wq, condition) \
  614. ((condition) \
  615. ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 0))
  616. /**
  617. * wait_event_interruptible_locked_irq - sleep until a condition gets true
  618. * @wq: the waitqueue to wait on
  619. * @condition: a C expression for the event to wait for
  620. *
  621. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  622. * @condition evaluates to true or a signal is received.
  623. * The @condition is checked each time the waitqueue @wq is woken up.
  624. *
  625. * It must be called with wq.lock being held. This spinlock is
  626. * unlocked while sleeping but @condition testing is done while lock
  627. * is held and when this macro exits the lock is held.
  628. *
  629. * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
  630. * functions which must match the way they are locked/unlocked outside
  631. * of this macro.
  632. *
  633. * wake_up_locked() has to be called after changing any variable that could
  634. * change the result of the wait condition.
  635. *
  636. * The function will return -ERESTARTSYS if it was interrupted by a
  637. * signal and 0 if @condition evaluated to true.
  638. */
  639. #define wait_event_interruptible_locked_irq(wq, condition) \
  640. ((condition) \
  641. ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 1))
  642. /**
  643. * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
  644. * @wq: the waitqueue to wait on
  645. * @condition: a C expression for the event to wait for
  646. *
  647. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  648. * @condition evaluates to true or a signal is received.
  649. * The @condition is checked each time the waitqueue @wq is woken up.
  650. *
  651. * It must be called with wq.lock being held. This spinlock is
  652. * unlocked while sleeping but @condition testing is done while lock
  653. * is held and when this macro exits the lock is held.
  654. *
  655. * The lock is locked/unlocked using spin_lock()/spin_unlock()
  656. * functions which must match the way they are locked/unlocked outside
  657. * of this macro.
  658. *
  659. * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
  660. * set thus when other process waits process on the list if this
  661. * process is awaken further processes are not considered.
  662. *
  663. * wake_up_locked() has to be called after changing any variable that could
  664. * change the result of the wait condition.
  665. *
  666. * The function will return -ERESTARTSYS if it was interrupted by a
  667. * signal and 0 if @condition evaluated to true.
  668. */
  669. #define wait_event_interruptible_exclusive_locked(wq, condition) \
  670. ((condition) \
  671. ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 0))
  672. /**
  673. * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
  674. * @wq: the waitqueue to wait on
  675. * @condition: a C expression for the event to wait for
  676. *
  677. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  678. * @condition evaluates to true or a signal is received.
  679. * The @condition is checked each time the waitqueue @wq is woken up.
  680. *
  681. * It must be called with wq.lock being held. This spinlock is
  682. * unlocked while sleeping but @condition testing is done while lock
  683. * is held and when this macro exits the lock is held.
  684. *
  685. * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
  686. * functions which must match the way they are locked/unlocked outside
  687. * of this macro.
  688. *
  689. * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
  690. * set thus when other process waits process on the list if this
  691. * process is awaken further processes are not considered.
  692. *
  693. * wake_up_locked() has to be called after changing any variable that could
  694. * change the result of the wait condition.
  695. *
  696. * The function will return -ERESTARTSYS if it was interrupted by a
  697. * signal and 0 if @condition evaluated to true.
  698. */
  699. #define wait_event_interruptible_exclusive_locked_irq(wq, condition) \
  700. ((condition) \
  701. ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1))
  702. #define __wait_event_killable(wq, condition) \
  703. ___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
  704. /**
  705. * wait_event_killable - sleep until a condition gets true
  706. * @wq: the waitqueue to wait on
  707. * @condition: a C expression for the event to wait for
  708. *
  709. * The process is put to sleep (TASK_KILLABLE) until the
  710. * @condition evaluates to true or a signal is received.
  711. * The @condition is checked each time the waitqueue @wq is woken up.
  712. *
  713. * wake_up() has to be called after changing any variable that could
  714. * change the result of the wait condition.
  715. *
  716. * The function will return -ERESTARTSYS if it was interrupted by a
  717. * signal and 0 if @condition evaluated to true.
  718. */
  719. #define wait_event_killable(wq, condition) \
  720. ({ \
  721. int __ret = 0; \
  722. might_sleep(); \
  723. if (!(condition)) \
  724. __ret = __wait_event_killable(wq, condition); \
  725. __ret; \
  726. })
  727. #define __wait_event_lock_irq(wq, condition, lock, cmd) \
  728. (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
  729. spin_unlock_irq(&lock); \
  730. cmd; \
  731. schedule(); \
  732. spin_lock_irq(&lock))
  733. /**
  734. * wait_event_lock_irq_cmd - sleep until a condition gets true. The
  735. * condition is checked under the lock. This
  736. * is expected to be called with the lock
  737. * taken.
  738. * @wq: the waitqueue to wait on
  739. * @condition: a C expression for the event to wait for
  740. * @lock: a locked spinlock_t, which will be released before cmd
  741. * and schedule() and reacquired afterwards.
  742. * @cmd: a command which is invoked outside the critical section before
  743. * sleep
  744. *
  745. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  746. * @condition evaluates to true. The @condition is checked each time
  747. * the waitqueue @wq is woken up.
  748. *
  749. * wake_up() has to be called after changing any variable that could
  750. * change the result of the wait condition.
  751. *
  752. * This is supposed to be called while holding the lock. The lock is
  753. * dropped before invoking the cmd and going to sleep and is reacquired
  754. * afterwards.
  755. */
  756. #define wait_event_lock_irq_cmd(wq, condition, lock, cmd) \
  757. do { \
  758. if (condition) \
  759. break; \
  760. __wait_event_lock_irq(wq, condition, lock, cmd); \
  761. } while (0)
  762. /**
  763. * wait_event_lock_irq - sleep until a condition gets true. The
  764. * condition is checked under the lock. This
  765. * is expected to be called with the lock
  766. * taken.
  767. * @wq: the waitqueue to wait on
  768. * @condition: a C expression for the event to wait for
  769. * @lock: a locked spinlock_t, which will be released before schedule()
  770. * and reacquired afterwards.
  771. *
  772. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  773. * @condition evaluates to true. The @condition is checked each time
  774. * the waitqueue @wq is woken up.
  775. *
  776. * wake_up() has to be called after changing any variable that could
  777. * change the result of the wait condition.
  778. *
  779. * This is supposed to be called while holding the lock. The lock is
  780. * dropped before going to sleep and is reacquired afterwards.
  781. */
  782. #define wait_event_lock_irq(wq, condition, lock) \
  783. do { \
  784. if (condition) \
  785. break; \
  786. __wait_event_lock_irq(wq, condition, lock, ); \
  787. } while (0)
  788. #define __wait_event_interruptible_lock_irq(wq, condition, lock, cmd) \
  789. ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
  790. spin_unlock_irq(&lock); \
  791. cmd; \
  792. schedule(); \
  793. spin_lock_irq(&lock))
  794. /**
  795. * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
  796. * The condition is checked under the lock. This is expected to
  797. * be called with the lock taken.
  798. * @wq: the waitqueue to wait on
  799. * @condition: a C expression for the event to wait for
  800. * @lock: a locked spinlock_t, which will be released before cmd and
  801. * schedule() and reacquired afterwards.
  802. * @cmd: a command which is invoked outside the critical section before
  803. * sleep
  804. *
  805. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  806. * @condition evaluates to true or a signal is received. The @condition is
  807. * checked each time the waitqueue @wq is woken up.
  808. *
  809. * wake_up() has to be called after changing any variable that could
  810. * change the result of the wait condition.
  811. *
  812. * This is supposed to be called while holding the lock. The lock is
  813. * dropped before invoking the cmd and going to sleep and is reacquired
  814. * afterwards.
  815. *
  816. * The macro will return -ERESTARTSYS if it was interrupted by a signal
  817. * and 0 if @condition evaluated to true.
  818. */
  819. #define wait_event_interruptible_lock_irq_cmd(wq, condition, lock, cmd) \
  820. ({ \
  821. int __ret = 0; \
  822. if (!(condition)) \
  823. __ret = __wait_event_interruptible_lock_irq(wq, \
  824. condition, lock, cmd); \
  825. __ret; \
  826. })
  827. /**
  828. * wait_event_interruptible_lock_irq - sleep until a condition gets true.
  829. * The condition is checked under the lock. This is expected
  830. * to be called with the lock taken.
  831. * @wq: the waitqueue to wait on
  832. * @condition: a C expression for the event to wait for
  833. * @lock: a locked spinlock_t, which will be released before schedule()
  834. * and reacquired afterwards.
  835. *
  836. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  837. * @condition evaluates to true or signal is received. The @condition is
  838. * checked each time the waitqueue @wq is woken up.
  839. *
  840. * wake_up() has to be called after changing any variable that could
  841. * change the result of the wait condition.
  842. *
  843. * This is supposed to be called while holding the lock. The lock is
  844. * dropped before going to sleep and is reacquired afterwards.
  845. *
  846. * The macro will return -ERESTARTSYS if it was interrupted by a signal
  847. * and 0 if @condition evaluated to true.
  848. */
  849. #define wait_event_interruptible_lock_irq(wq, condition, lock) \
  850. ({ \
  851. int __ret = 0; \
  852. if (!(condition)) \
  853. __ret = __wait_event_interruptible_lock_irq(wq, \
  854. condition, lock,); \
  855. __ret; \
  856. })
  857. #define __wait_event_interruptible_lock_irq_timeout(wq, condition, \
  858. lock, timeout) \
  859. ___wait_event(wq, ___wait_cond_timeout(condition), \
  860. TASK_INTERRUPTIBLE, 0, timeout, \
  861. spin_unlock_irq(&lock); \
  862. __ret = schedule_timeout(__ret); \
  863. spin_lock_irq(&lock));
  864. /**
  865. * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets
  866. * true or a timeout elapses. The condition is checked under
  867. * the lock. This is expected to be called with the lock taken.
  868. * @wq: the waitqueue to wait on
  869. * @condition: a C expression for the event to wait for
  870. * @lock: a locked spinlock_t, which will be released before schedule()
  871. * and reacquired afterwards.
  872. * @timeout: timeout, in jiffies
  873. *
  874. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  875. * @condition evaluates to true or signal is received. The @condition is
  876. * checked each time the waitqueue @wq is woken up.
  877. *
  878. * wake_up() has to be called after changing any variable that could
  879. * change the result of the wait condition.
  880. *
  881. * This is supposed to be called while holding the lock. The lock is
  882. * dropped before going to sleep and is reacquired afterwards.
  883. *
  884. * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
  885. * was interrupted by a signal, and the remaining jiffies otherwise
  886. * if the condition evaluated to true before the timeout elapsed.
  887. */
  888. #define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \
  889. timeout) \
  890. ({ \
  891. long __ret = timeout; \
  892. if (!___wait_cond_timeout(condition)) \
  893. __ret = __wait_event_interruptible_lock_irq_timeout( \
  894. wq, condition, lock, timeout); \
  895. __ret; \
  896. })
  897. /*
  898. * Waitqueues which are removed from the waitqueue_head at wakeup time
  899. */
  900. void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
  901. void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state);
  902. long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state);
  903. void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
  904. long wait_woken(wait_queue_t *wait, unsigned mode, long timeout);
  905. int woken_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
  906. int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
  907. int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
  908. #define DEFINE_WAIT_FUNC(name, function) \
  909. wait_queue_t name = { \
  910. .private = current, \
  911. .func = function, \
  912. .task_list = LIST_HEAD_INIT((name).task_list), \
  913. }
  914. #define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
  915. #define DEFINE_WAIT_BIT(name, word, bit) \
  916. struct wait_bit_queue name = { \
  917. .key = __WAIT_BIT_KEY_INITIALIZER(word, bit), \
  918. .wait = { \
  919. .private = current, \
  920. .func = wake_bit_function, \
  921. .task_list = \
  922. LIST_HEAD_INIT((name).wait.task_list), \
  923. }, \
  924. }
  925. #define init_wait(wait) \
  926. do { \
  927. (wait)->private = current; \
  928. (wait)->func = autoremove_wake_function; \
  929. INIT_LIST_HEAD(&(wait)->task_list); \
  930. (wait)->flags = 0; \
  931. } while (0)
  932. extern int bit_wait(struct wait_bit_key *, int);
  933. extern int bit_wait_io(struct wait_bit_key *, int);
  934. extern int bit_wait_timeout(struct wait_bit_key *, int);
  935. extern int bit_wait_io_timeout(struct wait_bit_key *, int);
  936. /**
  937. * wait_on_bit - wait for a bit to be cleared
  938. * @word: the word being waited on, a kernel virtual address
  939. * @bit: the bit of the word being waited on
  940. * @mode: the task state to sleep in
  941. *
  942. * There is a standard hashed waitqueue table for generic use. This
  943. * is the part of the hashtable's accessor API that waits on a bit.
  944. * For instance, if one were to have waiters on a bitflag, one would
  945. * call wait_on_bit() in threads waiting for the bit to clear.
  946. * One uses wait_on_bit() where one is waiting for the bit to clear,
  947. * but has no intention of setting it.
  948. * Returned value will be zero if the bit was cleared, or non-zero
  949. * if the process received a signal and the mode permitted wakeup
  950. * on that signal.
  951. */
  952. static inline int
  953. wait_on_bit(unsigned long *word, int bit, unsigned mode)
  954. {
  955. might_sleep();
  956. if (!test_bit(bit, word))
  957. return 0;
  958. return out_of_line_wait_on_bit(word, bit,
  959. bit_wait,
  960. mode);
  961. }
  962. /**
  963. * wait_on_bit_io - wait for a bit to be cleared
  964. * @word: the word being waited on, a kernel virtual address
  965. * @bit: the bit of the word being waited on
  966. * @mode: the task state to sleep in
  967. *
  968. * Use the standard hashed waitqueue table to wait for a bit
  969. * to be cleared. This is similar to wait_on_bit(), but calls
  970. * io_schedule() instead of schedule() for the actual waiting.
  971. *
  972. * Returned value will be zero if the bit was cleared, or non-zero
  973. * if the process received a signal and the mode permitted wakeup
  974. * on that signal.
  975. */
  976. static inline int
  977. wait_on_bit_io(unsigned long *word, int bit, unsigned mode)
  978. {
  979. might_sleep();
  980. if (!test_bit(bit, word))
  981. return 0;
  982. return out_of_line_wait_on_bit(word, bit,
  983. bit_wait_io,
  984. mode);
  985. }
  986. /**
  987. * wait_on_bit_timeout - wait for a bit to be cleared or a timeout elapses
  988. * @word: the word being waited on, a kernel virtual address
  989. * @bit: the bit of the word being waited on
  990. * @mode: the task state to sleep in
  991. * @timeout: timeout, in jiffies
  992. *
  993. * Use the standard hashed waitqueue table to wait for a bit
  994. * to be cleared. This is similar to wait_on_bit(), except also takes a
  995. * timeout parameter.
  996. *
  997. * Returned value will be zero if the bit was cleared before the
  998. * @timeout elapsed, or non-zero if the @timeout elapsed or process
  999. * received a signal and the mode permitted wakeup on that signal.
  1000. */
  1001. static inline int
  1002. wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode,
  1003. unsigned long timeout)
  1004. {
  1005. might_sleep();
  1006. if (!test_bit(bit, word))
  1007. return 0;
  1008. return out_of_line_wait_on_bit_timeout(word, bit,
  1009. bit_wait_timeout,
  1010. mode, timeout);
  1011. }
  1012. /**
  1013. * wait_on_bit_action - wait for a bit to be cleared
  1014. * @word: the word being waited on, a kernel virtual address
  1015. * @bit: the bit of the word being waited on
  1016. * @action: the function used to sleep, which may take special actions
  1017. * @mode: the task state to sleep in
  1018. *
  1019. * Use the standard hashed waitqueue table to wait for a bit
  1020. * to be cleared, and allow the waiting action to be specified.
  1021. * This is like wait_on_bit() but allows fine control of how the waiting
  1022. * is done.
  1023. *
  1024. * Returned value will be zero if the bit was cleared, or non-zero
  1025. * if the process received a signal and the mode permitted wakeup
  1026. * on that signal.
  1027. */
  1028. static inline int
  1029. wait_on_bit_action(unsigned long *word, int bit, wait_bit_action_f *action,
  1030. unsigned mode)
  1031. {
  1032. might_sleep();
  1033. if (!test_bit(bit, word))
  1034. return 0;
  1035. return out_of_line_wait_on_bit(word, bit, action, mode);
  1036. }
  1037. /**
  1038. * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it
  1039. * @word: the word being waited on, a kernel virtual address
  1040. * @bit: the bit of the word being waited on
  1041. * @mode: the task state to sleep in
  1042. *
  1043. * There is a standard hashed waitqueue table for generic use. This
  1044. * is the part of the hashtable's accessor API that waits on a bit
  1045. * when one intends to set it, for instance, trying to lock bitflags.
  1046. * For instance, if one were to have waiters trying to set bitflag
  1047. * and waiting for it to clear before setting it, one would call
  1048. * wait_on_bit() in threads waiting to be able to set the bit.
  1049. * One uses wait_on_bit_lock() where one is waiting for the bit to
  1050. * clear with the intention of setting it, and when done, clearing it.
  1051. *
  1052. * Returns zero if the bit was (eventually) found to be clear and was
  1053. * set. Returns non-zero if a signal was delivered to the process and
  1054. * the @mode allows that signal to wake the process.
  1055. */
  1056. static inline int
  1057. wait_on_bit_lock(unsigned long *word, int bit, unsigned mode)
  1058. {
  1059. might_sleep();
  1060. if (!test_and_set_bit(bit, word))
  1061. return 0;
  1062. return out_of_line_wait_on_bit_lock(word, bit, bit_wait, mode);
  1063. }
  1064. /**
  1065. * wait_on_bit_lock_io - wait for a bit to be cleared, when wanting to set it
  1066. * @word: the word being waited on, a kernel virtual address
  1067. * @bit: the bit of the word being waited on
  1068. * @mode: the task state to sleep in
  1069. *
  1070. * Use the standard hashed waitqueue table to wait for a bit
  1071. * to be cleared and then to atomically set it. This is similar
  1072. * to wait_on_bit(), but calls io_schedule() instead of schedule()
  1073. * for the actual waiting.
  1074. *
  1075. * Returns zero if the bit was (eventually) found to be clear and was
  1076. * set. Returns non-zero if a signal was delivered to the process and
  1077. * the @mode allows that signal to wake the process.
  1078. */
  1079. static inline int
  1080. wait_on_bit_lock_io(unsigned long *word, int bit, unsigned mode)
  1081. {
  1082. might_sleep();
  1083. if (!test_and_set_bit(bit, word))
  1084. return 0;
  1085. return out_of_line_wait_on_bit_lock(word, bit, bit_wait_io, mode);
  1086. }
  1087. /**
  1088. * wait_on_bit_lock_action - wait for a bit to be cleared, when wanting to set it
  1089. * @word: the word being waited on, a kernel virtual address
  1090. * @bit: the bit of the word being waited on
  1091. * @action: the function used to sleep, which may take special actions
  1092. * @mode: the task state to sleep in
  1093. *
  1094. * Use the standard hashed waitqueue table to wait for a bit
  1095. * to be cleared and then to set it, and allow the waiting action
  1096. * to be specified.
  1097. * This is like wait_on_bit() but allows fine control of how the waiting
  1098. * is done.
  1099. *
  1100. * Returns zero if the bit was (eventually) found to be clear and was
  1101. * set. Returns non-zero if a signal was delivered to the process and
  1102. * the @mode allows that signal to wake the process.
  1103. */
  1104. static inline int
  1105. wait_on_bit_lock_action(unsigned long *word, int bit, wait_bit_action_f *action,
  1106. unsigned mode)
  1107. {
  1108. might_sleep();
  1109. if (!test_and_set_bit(bit, word))
  1110. return 0;
  1111. return out_of_line_wait_on_bit_lock(word, bit, action, mode);
  1112. }
  1113. /**
  1114. * wait_on_atomic_t - Wait for an atomic_t to become 0
  1115. * @val: The atomic value being waited on, a kernel virtual address
  1116. * @action: the function used to sleep, which may take special actions
  1117. * @mode: the task state to sleep in
  1118. *
  1119. * Wait for an atomic_t to become 0. We abuse the bit-wait waitqueue table for
  1120. * the purpose of getting a waitqueue, but we set the key to a bit number
  1121. * outside of the target 'word'.
  1122. */
  1123. static inline
  1124. int wait_on_atomic_t(atomic_t *val, int (*action)(atomic_t *), unsigned mode)
  1125. {
  1126. might_sleep();
  1127. if (atomic_read(val) == 0)
  1128. return 0;
  1129. return out_of_line_wait_on_atomic_t(val, action, mode);
  1130. }
  1131. #endif /* _LINUX_WAIT_H */