|
@@ -262,4 +262,74 @@ int wait_on_atomic_t(atomic_t *val, wait_atomic_t_action_f action, unsigned mode
|
|
|
return out_of_line_wait_on_atomic_t(val, action, mode);
|
|
|
}
|
|
|
|
|
|
+extern void init_wait_var_entry(struct wait_bit_queue_entry *wbq_entry, void *var, int flags);
|
|
|
+extern void wake_up_var(void *var);
|
|
|
+extern wait_queue_head_t *__var_waitqueue(void *p);
|
|
|
+
|
|
|
+#define ___wait_var_event(var, condition, state, exclusive, ret, cmd) \
|
|
|
+({ \
|
|
|
+ __label__ __out; \
|
|
|
+ struct wait_queue_head *__wq_head = __var_waitqueue(var); \
|
|
|
+ struct wait_bit_queue_entry __wbq_entry; \
|
|
|
+ long __ret = ret; /* explicit shadow */ \
|
|
|
+ \
|
|
|
+ init_wait_var_entry(&__wbq_entry, var, \
|
|
|
+ exclusive ? WQ_FLAG_EXCLUSIVE : 0); \
|
|
|
+ for (;;) { \
|
|
|
+ long __int = prepare_to_wait_event(__wq_head, \
|
|
|
+ &__wbq_entry.wq_entry, \
|
|
|
+ state); \
|
|
|
+ if (condition) \
|
|
|
+ break; \
|
|
|
+ \
|
|
|
+ if (___wait_is_interruptible(state) && __int) { \
|
|
|
+ __ret = __int; \
|
|
|
+ goto __out; \
|
|
|
+ } \
|
|
|
+ \
|
|
|
+ cmd; \
|
|
|
+ } \
|
|
|
+ finish_wait(__wq_head, &__wbq_entry.wq_entry); \
|
|
|
+__out: __ret; \
|
|
|
+})
|
|
|
+
|
|
|
+#define __wait_var_event(var, condition) \
|
|
|
+ ___wait_var_event(var, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
|
|
|
+ schedule())
|
|
|
+
|
|
|
+#define wait_var_event(var, condition) \
|
|
|
+do { \
|
|
|
+ might_sleep(); \
|
|
|
+ if (condition) \
|
|
|
+ break; \
|
|
|
+ __wait_var_event(var, condition); \
|
|
|
+} while (0)
|
|
|
+
|
|
|
+#define __wait_var_event_killable(var, condition) \
|
|
|
+ ___wait_var_event(var, condition, TASK_KILLABLE, 0, 0, \
|
|
|
+ schedule())
|
|
|
+
|
|
|
+#define wait_var_event_killable(var, condition) \
|
|
|
+({ \
|
|
|
+ int __ret = 0; \
|
|
|
+ might_sleep(); \
|
|
|
+ if (!(condition)) \
|
|
|
+ __ret = __wait_var_event_killable(var, condition); \
|
|
|
+ __ret; \
|
|
|
+})
|
|
|
+
|
|
|
+#define __wait_var_event_timeout(var, condition, timeout) \
|
|
|
+ ___wait_var_event(var, ___wait_cond_timeout(condition), \
|
|
|
+ TASK_UNINTERRUPTIBLE, 0, timeout, \
|
|
|
+ __ret = schedule_timeout(__ret))
|
|
|
+
|
|
|
+#define wait_var_event_timeout(var, condition, timeout) \
|
|
|
+({ \
|
|
|
+ long __ret = timeout; \
|
|
|
+ might_sleep(); \
|
|
|
+ if (!___wait_cond_timeout(condition)) \
|
|
|
+ __ret = __wait_var_event_timeout(var, condition, timeout); \
|
|
|
+ __ret; \
|
|
|
+})
|
|
|
+
|
|
|
#endif /* _LINUX_WAIT_BIT_H */
|