wait.h 43 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _LINUX_WAIT_H
  3. #define _LINUX_WAIT_H
  4. /*
  5. * Linux wait queue related types and methods
  6. */
  7. #include <linux/list.h>
  8. #include <linux/stddef.h>
  9. #include <linux/spinlock.h>
  10. #include <asm/current.h>
  11. #include <uapi/linux/wait.h>
  12. typedef struct wait_queue_entry wait_queue_entry_t;
  13. typedef int (*wait_queue_func_t)(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
  14. int default_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
  15. /* wait_queue_entry::flags */
  16. #define WQ_FLAG_EXCLUSIVE 0x01
  17. #define WQ_FLAG_WOKEN 0x02
  18. #define WQ_FLAG_BOOKMARK 0x04
  19. #define WQ_FLAG_CUSTOM 0x08
  20. #define WQ_FLAG_DONE 0x10
  21. #define WQ_FLAG_PRIORITY 0x20
  22. /*
  23. * A single wait-queue entry structure:
  24. */
  25. struct wait_queue_entry {
  26. unsigned int flags;
  27. void *private;
  28. wait_queue_func_t func;
  29. struct list_head entry;
  30. };
  31. struct wait_queue_head {
  32. spinlock_t lock;
  33. struct list_head head;
  34. };
  35. typedef struct wait_queue_head wait_queue_head_t;
  36. struct task_struct;
  37. /*
  38. * Macros for declaration and initialisaton of the datatypes
  39. */
  40. #define __WAITQUEUE_INITIALIZER(name, tsk) { \
  41. .private = tsk, \
  42. .func = default_wake_function, \
  43. .entry = { NULL, NULL } }
  44. #define DECLARE_WAITQUEUE(name, tsk) \
  45. struct wait_queue_entry name = __WAITQUEUE_INITIALIZER(name, tsk)
  46. #define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
  47. .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
  48. .head = LIST_HEAD_INIT(name.head) }
  49. #define DECLARE_WAIT_QUEUE_HEAD(name) \
  50. struct wait_queue_head name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
  51. extern void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *);
  52. #define init_waitqueue_head(wq_head) \
  53. do { \
  54. static struct lock_class_key __key; \
  55. \
  56. __init_waitqueue_head((wq_head), #wq_head, &__key); \
  57. } while (0)
  58. #ifdef CONFIG_LOCKDEP
  59. # define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
  60. ({ init_waitqueue_head(&name); name; })
  61. # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
  62. struct wait_queue_head name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
  63. #else
  64. # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
  65. #endif
  66. static inline void init_waitqueue_entry(struct wait_queue_entry *wq_entry, struct task_struct *p)
  67. {
  68. wq_entry->flags = 0;
  69. wq_entry->private = p;
  70. wq_entry->func = default_wake_function;
  71. }
  72. static inline void
  73. init_waitqueue_func_entry(struct wait_queue_entry *wq_entry, wait_queue_func_t func)
  74. {
  75. wq_entry->flags = 0;
  76. wq_entry->private = NULL;
  77. wq_entry->func = func;
  78. }
  79. /**
  80. * waitqueue_active -- locklessly test for waiters on the queue
  81. * @wq_head: the waitqueue to test for waiters
  82. *
  83. * returns true if the wait list is not empty
  84. *
  85. * NOTE: this function is lockless and requires care, incorrect usage _will_
  86. * lead to sporadic and non-obvious failure.
  87. *
  88. * Use either while holding wait_queue_head::lock or when used for wakeups
  89. * with an extra smp_mb() like::
  90. *
  91. * CPU0 - waker CPU1 - waiter
  92. *
  93. * for (;;) {
  94. * @cond = true; prepare_to_wait(&wq_head, &wait, state);
  95. * smp_mb(); // smp_mb() from set_current_state()
  96. * if (waitqueue_active(wq_head)) if (@cond)
  97. * wake_up(wq_head); break;
  98. * schedule();
  99. * }
  100. * finish_wait(&wq_head, &wait);
  101. *
  102. * Because without the explicit smp_mb() it's possible for the
  103. * waitqueue_active() load to get hoisted over the @cond store such that we'll
  104. * observe an empty wait list while the waiter might not observe @cond.
  105. *
  106. * Also note that this 'optimization' trades a spin_lock() for an smp_mb(),
  107. * which (when the lock is uncontended) are of roughly equal cost.
  108. */
  109. static inline int waitqueue_active(struct wait_queue_head *wq_head)
  110. {
  111. return !list_empty(&wq_head->head);
  112. }
  113. /**
  114. * wq_has_single_sleeper - check if there is only one sleeper
  115. * @wq_head: wait queue head
  116. *
  117. * Returns true of wq_head has only one sleeper on the list.
  118. *
  119. * Please refer to the comment for waitqueue_active.
  120. */
  121. static inline bool wq_has_single_sleeper(struct wait_queue_head *wq_head)
  122. {
  123. return list_is_singular(&wq_head->head);
  124. }
  125. /**
  126. * wq_has_sleeper - check if there are any waiting processes
  127. * @wq_head: wait queue head
  128. *
  129. * Returns true if wq_head has waiting processes
  130. *
  131. * Please refer to the comment for waitqueue_active.
  132. */
  133. static inline bool wq_has_sleeper(struct wait_queue_head *wq_head)
  134. {
  135. /*
  136. * We need to be sure we are in sync with the
  137. * add_wait_queue modifications to the wait queue.
  138. *
  139. * This memory barrier should be paired with one on the
  140. * waiting side.
  141. */
  142. smp_mb();
  143. return waitqueue_active(wq_head);
  144. }
  145. extern void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
  146. extern void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
  147. extern void add_wait_queue_priority(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
  148. extern void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
  149. static inline void __add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
  150. {
  151. struct list_head *head = &wq_head->head;
  152. struct wait_queue_entry *wq;
  153. list_for_each_entry(wq, &wq_head->head, entry) {
  154. if (!(wq->flags & WQ_FLAG_PRIORITY))
  155. break;
  156. head = &wq->entry;
  157. }
  158. list_add(&wq_entry->entry, head);
  159. }
  160. /*
  161. * Used for wake-one threads:
  162. */
  163. static inline void
  164. __add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
  165. {
  166. wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
  167. __add_wait_queue(wq_head, wq_entry);
  168. }
  169. static inline void __add_wait_queue_entry_tail(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
  170. {
  171. list_add_tail(&wq_entry->entry, &wq_head->head);
  172. }
  173. static inline void
  174. __add_wait_queue_entry_tail_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
  175. {
  176. wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
  177. __add_wait_queue_entry_tail(wq_head, wq_entry);
  178. }
  179. static inline void
  180. __remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
  181. {
  182. list_del(&wq_entry->entry);
  183. }
  184. int __wake_up(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
  185. void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
  186. void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head,
  187. unsigned int mode, void *key, wait_queue_entry_t *bookmark);
  188. void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
  189. void __wake_up_locked_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
  190. void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr);
  191. void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode);
  192. void __wake_up_pollfree(struct wait_queue_head *wq_head);
  193. #define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
  194. #define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
  195. #define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL)
  196. #define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1)
  197. #define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0)
  198. #define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
  199. #define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
  200. #define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
  201. #define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE)
  202. #define wake_up_sync(x) __wake_up_sync((x), TASK_NORMAL)
  203. /*
  204. * Wakeup macros to be used to report events to the targets.
  205. */
  206. #define poll_to_key(m) ((void *)(__force uintptr_t)(__poll_t)(m))
  207. #define key_to_poll(m) ((__force __poll_t)(uintptr_t)(void *)(m))
  208. #define wake_up_poll(x, m) \
  209. __wake_up(x, TASK_NORMAL, 1, poll_to_key(m))
  210. #define wake_up_locked_poll(x, m) \
  211. __wake_up_locked_key((x), TASK_NORMAL, poll_to_key(m))
  212. #define wake_up_interruptible_poll(x, m) \
  213. __wake_up(x, TASK_INTERRUPTIBLE, 1, poll_to_key(m))
  214. #define wake_up_interruptible_sync_poll(x, m) \
  215. __wake_up_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m))
  216. #define wake_up_interruptible_sync_poll_locked(x, m) \
  217. __wake_up_locked_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m))
  218. /**
  219. * wake_up_pollfree - signal that a polled waitqueue is going away
  220. * @wq_head: the wait queue head
  221. *
  222. * In the very rare cases where a ->poll() implementation uses a waitqueue whose
  223. * lifetime is tied to a task rather than to the 'struct file' being polled,
  224. * this function must be called before the waitqueue is freed so that
  225. * non-blocking polls (e.g. epoll) are notified that the queue is going away.
  226. *
  227. * The caller must also RCU-delay the freeing of the wait_queue_head, e.g. via
  228. * an explicit synchronize_rcu() or call_rcu(), or via SLAB_TYPESAFE_BY_RCU.
  229. */
  230. static inline void wake_up_pollfree(struct wait_queue_head *wq_head)
  231. {
  232. /*
  233. * For performance reasons, we don't always take the queue lock here.
  234. * Therefore, we might race with someone removing the last entry from
  235. * the queue, and proceed while they still hold the queue lock.
  236. * However, rcu_read_lock() is required to be held in such cases, so we
  237. * can safely proceed with an RCU-delayed free.
  238. */
  239. if (waitqueue_active(wq_head))
  240. __wake_up_pollfree(wq_head);
  241. }
  242. #define ___wait_cond_timeout(condition) \
  243. ({ \
  244. bool __cond = (condition); \
  245. if (__cond && !__ret) \
  246. __ret = 1; \
  247. __cond || !__ret; \
  248. })
  249. #define ___wait_is_interruptible(state) \
  250. (!__builtin_constant_p(state) || \
  251. (state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
  252. extern void init_wait_entry(struct wait_queue_entry *wq_entry, int flags);
  253. /*
  254. * The below macro ___wait_event() has an explicit shadow of the __ret
  255. * variable when used from the wait_event_*() macros.
  256. *
  257. * This is so that both can use the ___wait_cond_timeout() construct
  258. * to wrap the condition.
  259. *
  260. * The type inconsistency of the wait_event_*() __ret variable is also
  261. * on purpose; we use long where we can return timeout values and int
  262. * otherwise.
  263. */
  264. #define ___wait_event(wq_head, condition, state, exclusive, ret, cmd) \
  265. ({ \
  266. __label__ __out; \
  267. struct wait_queue_entry __wq_entry; \
  268. long __ret = ret; /* explicit shadow */ \
  269. \
  270. init_wait_entry(&__wq_entry, exclusive ? WQ_FLAG_EXCLUSIVE : 0); \
  271. for (;;) { \
  272. long __int = prepare_to_wait_event(&wq_head, &__wq_entry, state);\
  273. \
  274. if (condition) \
  275. break; \
  276. \
  277. if (___wait_is_interruptible(state) && __int) { \
  278. __ret = __int; \
  279. goto __out; \
  280. } \
  281. \
  282. cmd; \
  283. } \
  284. finish_wait(&wq_head, &__wq_entry); \
  285. __out: __ret; \
  286. })
  287. #define __wait_event(wq_head, condition) \
  288. (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
  289. schedule())
  290. /**
  291. * wait_event - sleep until a condition gets true
  292. * @wq_head: the waitqueue to wait on
  293. * @condition: a C expression for the event to wait for
  294. *
  295. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  296. * @condition evaluates to true. The @condition is checked each time
  297. * the waitqueue @wq_head is woken up.
  298. *
  299. * wake_up() has to be called after changing any variable that could
  300. * change the result of the wait condition.
  301. */
  302. #define wait_event(wq_head, condition) \
  303. do { \
  304. might_sleep(); \
  305. if (condition) \
  306. break; \
  307. __wait_event(wq_head, condition); \
  308. } while (0)
  309. #define __io_wait_event(wq_head, condition) \
  310. (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
  311. io_schedule())
  312. /*
  313. * io_wait_event() -- like wait_event() but with io_schedule()
  314. */
  315. #define io_wait_event(wq_head, condition) \
  316. do { \
  317. might_sleep(); \
  318. if (condition) \
  319. break; \
  320. __io_wait_event(wq_head, condition); \
  321. } while (0)
  322. #define __wait_event_freezable(wq_head, condition) \
  323. ___wait_event(wq_head, condition, (TASK_INTERRUPTIBLE|TASK_FREEZABLE), \
  324. 0, 0, schedule())
  325. /**
  326. * wait_event_freezable - sleep (or freeze) until a condition gets true
  327. * @wq_head: the waitqueue to wait on
  328. * @condition: a C expression for the event to wait for
  329. *
  330. * The process is put to sleep (TASK_INTERRUPTIBLE -- so as not to contribute
  331. * to system load) until the @condition evaluates to true. The
  332. * @condition is checked each time the waitqueue @wq_head is woken up.
  333. *
  334. * wake_up() has to be called after changing any variable that could
  335. * change the result of the wait condition.
  336. */
  337. #define wait_event_freezable(wq_head, condition) \
  338. ({ \
  339. int __ret = 0; \
  340. might_sleep(); \
  341. if (!(condition)) \
  342. __ret = __wait_event_freezable(wq_head, condition); \
  343. __ret; \
  344. })
  345. #define __wait_event_timeout(wq_head, condition, timeout) \
  346. ___wait_event(wq_head, ___wait_cond_timeout(condition), \
  347. TASK_UNINTERRUPTIBLE, 0, timeout, \
  348. __ret = schedule_timeout(__ret))
  349. /**
  350. * wait_event_timeout - sleep until a condition gets true or a timeout elapses
  351. * @wq_head: the waitqueue to wait on
  352. * @condition: a C expression for the event to wait for
  353. * @timeout: timeout, in jiffies
  354. *
  355. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  356. * @condition evaluates to true. The @condition is checked each time
  357. * the waitqueue @wq_head is woken up.
  358. *
  359. * wake_up() has to be called after changing any variable that could
  360. * change the result of the wait condition.
  361. *
  362. * Returns:
  363. * 0 if the @condition evaluated to %false after the @timeout elapsed,
  364. * 1 if the @condition evaluated to %true after the @timeout elapsed,
  365. * or the remaining jiffies (at least 1) if the @condition evaluated
  366. * to %true before the @timeout elapsed.
  367. */
  368. #define wait_event_timeout(wq_head, condition, timeout) \
  369. ({ \
  370. long __ret = timeout; \
  371. might_sleep(); \
  372. if (!___wait_cond_timeout(condition)) \
  373. __ret = __wait_event_timeout(wq_head, condition, timeout); \
  374. __ret; \
  375. })
  376. #define __wait_event_freezable_timeout(wq_head, condition, timeout) \
  377. ___wait_event(wq_head, ___wait_cond_timeout(condition), \
  378. (TASK_INTERRUPTIBLE|TASK_FREEZABLE), 0, timeout, \
  379. __ret = schedule_timeout(__ret))
  380. /*
  381. * like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid
  382. * increasing load and is freezable.
  383. */
  384. #define wait_event_freezable_timeout(wq_head, condition, timeout) \
  385. ({ \
  386. long __ret = timeout; \
  387. might_sleep(); \
  388. if (!___wait_cond_timeout(condition)) \
  389. __ret = __wait_event_freezable_timeout(wq_head, condition, timeout); \
  390. __ret; \
  391. })
  392. #define __wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2) \
  393. (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 1, 0, \
  394. cmd1; schedule(); cmd2)
  395. /*
  396. * Just like wait_event_cmd(), except it sets exclusive flag
  397. */
  398. #define wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2) \
  399. do { \
  400. if (condition) \
  401. break; \
  402. __wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2); \
  403. } while (0)
  404. #define __wait_event_cmd(wq_head, condition, cmd1, cmd2) \
  405. (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
  406. cmd1; schedule(); cmd2)
  407. /**
  408. * wait_event_cmd - sleep until a condition gets true
  409. * @wq_head: the waitqueue to wait on
  410. * @condition: a C expression for the event to wait for
  411. * @cmd1: the command will be executed before sleep
  412. * @cmd2: the command will be executed after sleep
  413. *
  414. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  415. * @condition evaluates to true. The @condition is checked each time
  416. * the waitqueue @wq_head is woken up.
  417. *
  418. * wake_up() has to be called after changing any variable that could
  419. * change the result of the wait condition.
  420. */
  421. #define wait_event_cmd(wq_head, condition, cmd1, cmd2) \
  422. do { \
  423. if (condition) \
  424. break; \
  425. __wait_event_cmd(wq_head, condition, cmd1, cmd2); \
  426. } while (0)
  427. #define __wait_event_interruptible(wq_head, condition) \
  428. ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \
  429. schedule())
  430. /**
  431. * wait_event_interruptible - sleep until a condition gets true
  432. * @wq_head: the waitqueue to wait on
  433. * @condition: a C expression for the event to wait for
  434. *
  435. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  436. * @condition evaluates to true or a signal is received.
  437. * The @condition is checked each time the waitqueue @wq_head is woken up.
  438. *
  439. * wake_up() has to be called after changing any variable that could
  440. * change the result of the wait condition.
  441. *
  442. * The function will return -ERESTARTSYS if it was interrupted by a
  443. * signal and 0 if @condition evaluated to true.
  444. */
  445. #define wait_event_interruptible(wq_head, condition) \
  446. ({ \
  447. int __ret = 0; \
  448. might_sleep(); \
  449. if (!(condition)) \
  450. __ret = __wait_event_interruptible(wq_head, condition); \
  451. __ret; \
  452. })
  453. #define __wait_event_interruptible_timeout(wq_head, condition, timeout) \
  454. ___wait_event(wq_head, ___wait_cond_timeout(condition), \
  455. TASK_INTERRUPTIBLE, 0, timeout, \
  456. __ret = schedule_timeout(__ret))
  457. /**
  458. * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
  459. * @wq_head: the waitqueue to wait on
  460. * @condition: a C expression for the event to wait for
  461. * @timeout: timeout, in jiffies
  462. *
  463. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  464. * @condition evaluates to true or a signal is received.
  465. * The @condition is checked each time the waitqueue @wq_head is woken up.
  466. *
  467. * wake_up() has to be called after changing any variable that could
  468. * change the result of the wait condition.
  469. *
  470. * Returns:
  471. * 0 if the @condition evaluated to %false after the @timeout elapsed,
  472. * 1 if the @condition evaluated to %true after the @timeout elapsed,
  473. * the remaining jiffies (at least 1) if the @condition evaluated
  474. * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
  475. * interrupted by a signal.
  476. */
  477. #define wait_event_interruptible_timeout(wq_head, condition, timeout) \
  478. ({ \
  479. long __ret = timeout; \
  480. might_sleep(); \
  481. if (!___wait_cond_timeout(condition)) \
  482. __ret = __wait_event_interruptible_timeout(wq_head, \
  483. condition, timeout); \
  484. __ret; \
  485. })
  486. #define __wait_event_hrtimeout(wq_head, condition, timeout, state) \
  487. ({ \
  488. int __ret = 0; \
  489. struct hrtimer_sleeper __t; \
  490. \
  491. hrtimer_init_sleeper_on_stack(&__t, CLOCK_MONOTONIC, \
  492. HRTIMER_MODE_REL); \
  493. if ((timeout) != KTIME_MAX) { \
  494. hrtimer_set_expires_range_ns(&__t.timer, timeout, \
  495. current->timer_slack_ns); \
  496. hrtimer_sleeper_start_expires(&__t, HRTIMER_MODE_REL); \
  497. } \
  498. \
  499. __ret = ___wait_event(wq_head, condition, state, 0, 0, \
  500. if (!__t.task) { \
  501. __ret = -ETIME; \
  502. break; \
  503. } \
  504. schedule()); \
  505. \
  506. hrtimer_cancel(&__t.timer); \
  507. destroy_hrtimer_on_stack(&__t.timer); \
  508. __ret; \
  509. })
  510. /**
  511. * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses
  512. * @wq_head: the waitqueue to wait on
  513. * @condition: a C expression for the event to wait for
  514. * @timeout: timeout, as a ktime_t
  515. *
  516. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  517. * @condition evaluates to true or a signal is received.
  518. * The @condition is checked each time the waitqueue @wq_head is woken up.
  519. *
  520. * wake_up() has to be called after changing any variable that could
  521. * change the result of the wait condition.
  522. *
  523. * The function returns 0 if @condition became true, or -ETIME if the timeout
  524. * elapsed.
  525. */
  526. #define wait_event_hrtimeout(wq_head, condition, timeout) \
  527. ({ \
  528. int __ret = 0; \
  529. might_sleep(); \
  530. if (!(condition)) \
  531. __ret = __wait_event_hrtimeout(wq_head, condition, timeout, \
  532. TASK_UNINTERRUPTIBLE); \
  533. __ret; \
  534. })
  535. /**
  536. * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
  537. * @wq: the waitqueue to wait on
  538. * @condition: a C expression for the event to wait for
  539. * @timeout: timeout, as a ktime_t
  540. *
  541. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  542. * @condition evaluates to true or a signal is received.
  543. * The @condition is checked each time the waitqueue @wq is woken up.
  544. *
  545. * wake_up() has to be called after changing any variable that could
  546. * change the result of the wait condition.
  547. *
  548. * The function returns 0 if @condition became true, -ERESTARTSYS if it was
  549. * interrupted by a signal, or -ETIME if the timeout elapsed.
  550. */
  551. #define wait_event_interruptible_hrtimeout(wq, condition, timeout) \
  552. ({ \
  553. long __ret = 0; \
  554. might_sleep(); \
  555. if (!(condition)) \
  556. __ret = __wait_event_hrtimeout(wq, condition, timeout, \
  557. TASK_INTERRUPTIBLE); \
  558. __ret; \
  559. })
  560. #define __wait_event_interruptible_exclusive(wq, condition) \
  561. ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
  562. schedule())
  563. #define wait_event_interruptible_exclusive(wq, condition) \
  564. ({ \
  565. int __ret = 0; \
  566. might_sleep(); \
  567. if (!(condition)) \
  568. __ret = __wait_event_interruptible_exclusive(wq, condition); \
  569. __ret; \
  570. })
  571. #define __wait_event_killable_exclusive(wq, condition) \
  572. ___wait_event(wq, condition, TASK_KILLABLE, 1, 0, \
  573. schedule())
  574. #define wait_event_killable_exclusive(wq, condition) \
  575. ({ \
  576. int __ret = 0; \
  577. might_sleep(); \
  578. if (!(condition)) \
  579. __ret = __wait_event_killable_exclusive(wq, condition); \
  580. __ret; \
  581. })
  582. #define __wait_event_freezable_exclusive(wq, condition) \
  583. ___wait_event(wq, condition, (TASK_INTERRUPTIBLE|TASK_FREEZABLE), 1, 0,\
  584. schedule())
  585. #define wait_event_freezable_exclusive(wq, condition) \
  586. ({ \
  587. int __ret = 0; \
  588. might_sleep(); \
  589. if (!(condition)) \
  590. __ret = __wait_event_freezable_exclusive(wq, condition); \
  591. __ret; \
  592. })
  593. /**
  594. * wait_event_idle - wait for a condition without contributing to system load
  595. * @wq_head: the waitqueue to wait on
  596. * @condition: a C expression for the event to wait for
  597. *
  598. * The process is put to sleep (TASK_IDLE) until the
  599. * @condition evaluates to true.
  600. * The @condition is checked each time the waitqueue @wq_head is woken up.
  601. *
  602. * wake_up() has to be called after changing any variable that could
  603. * change the result of the wait condition.
  604. *
  605. */
  606. #define wait_event_idle(wq_head, condition) \
  607. do { \
  608. might_sleep(); \
  609. if (!(condition)) \
  610. ___wait_event(wq_head, condition, TASK_IDLE, 0, 0, schedule()); \
  611. } while (0)
  612. /**
  613. * wait_event_idle_exclusive - wait for a condition with contributing to system load
  614. * @wq_head: the waitqueue to wait on
  615. * @condition: a C expression for the event to wait for
  616. *
  617. * The process is put to sleep (TASK_IDLE) until the
  618. * @condition evaluates to true.
  619. * The @condition is checked each time the waitqueue @wq_head is woken up.
  620. *
  621. * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
  622. * set thus if other processes wait on the same list, when this
  623. * process is woken further processes are not considered.
  624. *
  625. * wake_up() has to be called after changing any variable that could
  626. * change the result of the wait condition.
  627. *
  628. */
  629. #define wait_event_idle_exclusive(wq_head, condition) \
  630. do { \
  631. might_sleep(); \
  632. if (!(condition)) \
  633. ___wait_event(wq_head, condition, TASK_IDLE, 1, 0, schedule()); \
  634. } while (0)
  635. #define __wait_event_idle_timeout(wq_head, condition, timeout) \
  636. ___wait_event(wq_head, ___wait_cond_timeout(condition), \
  637. TASK_IDLE, 0, timeout, \
  638. __ret = schedule_timeout(__ret))
  639. /**
  640. * wait_event_idle_timeout - sleep without load until a condition becomes true or a timeout elapses
  641. * @wq_head: the waitqueue to wait on
  642. * @condition: a C expression for the event to wait for
  643. * @timeout: timeout, in jiffies
  644. *
  645. * The process is put to sleep (TASK_IDLE) until the
  646. * @condition evaluates to true. The @condition is checked each time
  647. * the waitqueue @wq_head is woken up.
  648. *
  649. * wake_up() has to be called after changing any variable that could
  650. * change the result of the wait condition.
  651. *
  652. * Returns:
  653. * 0 if the @condition evaluated to %false after the @timeout elapsed,
  654. * 1 if the @condition evaluated to %true after the @timeout elapsed,
  655. * or the remaining jiffies (at least 1) if the @condition evaluated
  656. * to %true before the @timeout elapsed.
  657. */
  658. #define wait_event_idle_timeout(wq_head, condition, timeout) \
  659. ({ \
  660. long __ret = timeout; \
  661. might_sleep(); \
  662. if (!___wait_cond_timeout(condition)) \
  663. __ret = __wait_event_idle_timeout(wq_head, condition, timeout); \
  664. __ret; \
  665. })
  666. #define __wait_event_idle_exclusive_timeout(wq_head, condition, timeout) \
  667. ___wait_event(wq_head, ___wait_cond_timeout(condition), \
  668. TASK_IDLE, 1, timeout, \
  669. __ret = schedule_timeout(__ret))
  670. /**
  671. * wait_event_idle_exclusive_timeout - sleep without load until a condition becomes true or a timeout elapses
  672. * @wq_head: the waitqueue to wait on
  673. * @condition: a C expression for the event to wait for
  674. * @timeout: timeout, in jiffies
  675. *
  676. * The process is put to sleep (TASK_IDLE) until the
  677. * @condition evaluates to true. The @condition is checked each time
  678. * the waitqueue @wq_head is woken up.
  679. *
  680. * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
  681. * set thus if other processes wait on the same list, when this
  682. * process is woken further processes are not considered.
  683. *
  684. * wake_up() has to be called after changing any variable that could
  685. * change the result of the wait condition.
  686. *
  687. * Returns:
  688. * 0 if the @condition evaluated to %false after the @timeout elapsed,
  689. * 1 if the @condition evaluated to %true after the @timeout elapsed,
  690. * or the remaining jiffies (at least 1) if the @condition evaluated
  691. * to %true before the @timeout elapsed.
  692. */
  693. #define wait_event_idle_exclusive_timeout(wq_head, condition, timeout) \
  694. ({ \
  695. long __ret = timeout; \
  696. might_sleep(); \
  697. if (!___wait_cond_timeout(condition)) \
  698. __ret = __wait_event_idle_exclusive_timeout(wq_head, condition, timeout);\
  699. __ret; \
  700. })
  701. extern int do_wait_intr(wait_queue_head_t *, wait_queue_entry_t *);
  702. extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_entry_t *);
  703. #define __wait_event_interruptible_locked(wq, condition, exclusive, fn) \
  704. ({ \
  705. int __ret; \
  706. DEFINE_WAIT(__wait); \
  707. if (exclusive) \
  708. __wait.flags |= WQ_FLAG_EXCLUSIVE; \
  709. do { \
  710. __ret = fn(&(wq), &__wait); \
  711. if (__ret) \
  712. break; \
  713. } while (!(condition)); \
  714. __remove_wait_queue(&(wq), &__wait); \
  715. __set_current_state(TASK_RUNNING); \
  716. __ret; \
  717. })
  718. /**
  719. * wait_event_interruptible_locked - sleep until a condition gets true
  720. * @wq: the waitqueue to wait on
  721. * @condition: a C expression for the event to wait for
  722. *
  723. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  724. * @condition evaluates to true or a signal is received.
  725. * The @condition is checked each time the waitqueue @wq is woken up.
  726. *
  727. * It must be called with wq.lock being held. This spinlock is
  728. * unlocked while sleeping but @condition testing is done while lock
  729. * is held and when this macro exits the lock is held.
  730. *
  731. * The lock is locked/unlocked using spin_lock()/spin_unlock()
  732. * functions which must match the way they are locked/unlocked outside
  733. * of this macro.
  734. *
  735. * wake_up_locked() has to be called after changing any variable that could
  736. * change the result of the wait condition.
  737. *
  738. * The function will return -ERESTARTSYS if it was interrupted by a
  739. * signal and 0 if @condition evaluated to true.
  740. */
  741. #define wait_event_interruptible_locked(wq, condition) \
  742. ((condition) \
  743. ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr))
  744. /**
  745. * wait_event_interruptible_locked_irq - sleep until a condition gets true
  746. * @wq: the waitqueue to wait on
  747. * @condition: a C expression for the event to wait for
  748. *
  749. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  750. * @condition evaluates to true or a signal is received.
  751. * The @condition is checked each time the waitqueue @wq is woken up.
  752. *
  753. * It must be called with wq.lock being held. This spinlock is
  754. * unlocked while sleeping but @condition testing is done while lock
  755. * is held and when this macro exits the lock is held.
  756. *
  757. * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
  758. * functions which must match the way they are locked/unlocked outside
  759. * of this macro.
  760. *
  761. * wake_up_locked() has to be called after changing any variable that could
  762. * change the result of the wait condition.
  763. *
  764. * The function will return -ERESTARTSYS if it was interrupted by a
  765. * signal and 0 if @condition evaluated to true.
  766. */
  767. #define wait_event_interruptible_locked_irq(wq, condition) \
  768. ((condition) \
  769. ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr_irq))
  770. /**
  771. * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
  772. * @wq: the waitqueue to wait on
  773. * @condition: a C expression for the event to wait for
  774. *
  775. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  776. * @condition evaluates to true or a signal is received.
  777. * The @condition is checked each time the waitqueue @wq is woken up.
  778. *
  779. * It must be called with wq.lock being held. This spinlock is
  780. * unlocked while sleeping but @condition testing is done while lock
  781. * is held and when this macro exits the lock is held.
  782. *
  783. * The lock is locked/unlocked using spin_lock()/spin_unlock()
  784. * functions which must match the way they are locked/unlocked outside
  785. * of this macro.
  786. *
  787. * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
  788. * set thus when other process waits process on the list if this
  789. * process is awaken further processes are not considered.
  790. *
  791. * wake_up_locked() has to be called after changing any variable that could
  792. * change the result of the wait condition.
  793. *
  794. * The function will return -ERESTARTSYS if it was interrupted by a
  795. * signal and 0 if @condition evaluated to true.
  796. */
  797. #define wait_event_interruptible_exclusive_locked(wq, condition) \
  798. ((condition) \
  799. ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr))
  800. /**
  801. * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
  802. * @wq: the waitqueue to wait on
  803. * @condition: a C expression for the event to wait for
  804. *
  805. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  806. * @condition evaluates to true or a signal is received.
  807. * The @condition is checked each time the waitqueue @wq is woken up.
  808. *
  809. * It must be called with wq.lock being held. This spinlock is
  810. * unlocked while sleeping but @condition testing is done while lock
  811. * is held and when this macro exits the lock is held.
  812. *
  813. * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
  814. * functions which must match the way they are locked/unlocked outside
  815. * of this macro.
  816. *
  817. * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
  818. * set thus when other process waits process on the list if this
  819. * process is awaken further processes are not considered.
  820. *
  821. * wake_up_locked() has to be called after changing any variable that could
  822. * change the result of the wait condition.
  823. *
  824. * The function will return -ERESTARTSYS if it was interrupted by a
  825. * signal and 0 if @condition evaluated to true.
  826. */
  827. #define wait_event_interruptible_exclusive_locked_irq(wq, condition) \
  828. ((condition) \
  829. ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr_irq))
  830. #define __wait_event_killable(wq, condition) \
  831. ___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
  832. /**
  833. * wait_event_killable - sleep until a condition gets true
  834. * @wq_head: the waitqueue to wait on
  835. * @condition: a C expression for the event to wait for
  836. *
  837. * The process is put to sleep (TASK_KILLABLE) until the
  838. * @condition evaluates to true or a signal is received.
  839. * The @condition is checked each time the waitqueue @wq_head is woken up.
  840. *
  841. * wake_up() has to be called after changing any variable that could
  842. * change the result of the wait condition.
  843. *
  844. * The function will return -ERESTARTSYS if it was interrupted by a
  845. * signal and 0 if @condition evaluated to true.
  846. */
  847. #define wait_event_killable(wq_head, condition) \
  848. ({ \
  849. int __ret = 0; \
  850. might_sleep(); \
  851. if (!(condition)) \
  852. __ret = __wait_event_killable(wq_head, condition); \
  853. __ret; \
  854. })
  855. #define __wait_event_state(wq, condition, state) \
  856. ___wait_event(wq, condition, state, 0, 0, schedule())
  857. /**
  858. * wait_event_state - sleep until a condition gets true
  859. * @wq_head: the waitqueue to wait on
  860. * @condition: a C expression for the event to wait for
  861. * @state: state to sleep in
  862. *
  863. * The process is put to sleep (@state) until the @condition evaluates to true
  864. * or a signal is received (when allowed by @state). The @condition is checked
  865. * each time the waitqueue @wq_head is woken up.
  866. *
  867. * wake_up() has to be called after changing any variable that could
  868. * change the result of the wait condition.
  869. *
  870. * The function will return -ERESTARTSYS if it was interrupted by a signal
  871. * (when allowed by @state) and 0 if @condition evaluated to true.
  872. */
  873. #define wait_event_state(wq_head, condition, state) \
  874. ({ \
  875. int __ret = 0; \
  876. might_sleep(); \
  877. if (!(condition)) \
  878. __ret = __wait_event_state(wq_head, condition, state); \
  879. __ret; \
  880. })
  881. #define __wait_event_killable_timeout(wq_head, condition, timeout) \
  882. ___wait_event(wq_head, ___wait_cond_timeout(condition), \
  883. TASK_KILLABLE, 0, timeout, \
  884. __ret = schedule_timeout(__ret))
  885. /**
  886. * wait_event_killable_timeout - sleep until a condition gets true or a timeout elapses
  887. * @wq_head: the waitqueue to wait on
  888. * @condition: a C expression for the event to wait for
  889. * @timeout: timeout, in jiffies
  890. *
  891. * The process is put to sleep (TASK_KILLABLE) until the
  892. * @condition evaluates to true or a kill signal is received.
  893. * The @condition is checked each time the waitqueue @wq_head is woken up.
  894. *
  895. * wake_up() has to be called after changing any variable that could
  896. * change the result of the wait condition.
  897. *
  898. * Returns:
  899. * 0 if the @condition evaluated to %false after the @timeout elapsed,
  900. * 1 if the @condition evaluated to %true after the @timeout elapsed,
  901. * the remaining jiffies (at least 1) if the @condition evaluated
  902. * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
  903. * interrupted by a kill signal.
  904. *
  905. * Only kill signals interrupt this process.
  906. */
  907. #define wait_event_killable_timeout(wq_head, condition, timeout) \
  908. ({ \
  909. long __ret = timeout; \
  910. might_sleep(); \
  911. if (!___wait_cond_timeout(condition)) \
  912. __ret = __wait_event_killable_timeout(wq_head, \
  913. condition, timeout); \
  914. __ret; \
  915. })
  916. #define __wait_event_lock_irq(wq_head, condition, lock, cmd) \
  917. (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
  918. spin_unlock_irq(&lock); \
  919. cmd; \
  920. schedule(); \
  921. spin_lock_irq(&lock))
  922. /**
  923. * wait_event_lock_irq_cmd - sleep until a condition gets true. The
  924. * condition is checked under the lock. This
  925. * is expected to be called with the lock
  926. * taken.
  927. * @wq_head: the waitqueue to wait on
  928. * @condition: a C expression for the event to wait for
  929. * @lock: a locked spinlock_t, which will be released before cmd
  930. * and schedule() and reacquired afterwards.
  931. * @cmd: a command which is invoked outside the critical section before
  932. * sleep
  933. *
  934. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  935. * @condition evaluates to true. The @condition is checked each time
  936. * the waitqueue @wq_head is woken up.
  937. *
  938. * wake_up() has to be called after changing any variable that could
  939. * change the result of the wait condition.
  940. *
  941. * This is supposed to be called while holding the lock. The lock is
  942. * dropped before invoking the cmd and going to sleep and is reacquired
  943. * afterwards.
  944. */
  945. #define wait_event_lock_irq_cmd(wq_head, condition, lock, cmd) \
  946. do { \
  947. if (condition) \
  948. break; \
  949. __wait_event_lock_irq(wq_head, condition, lock, cmd); \
  950. } while (0)
  951. /**
  952. * wait_event_lock_irq - sleep until a condition gets true. The
  953. * condition is checked under the lock. This
  954. * is expected to be called with the lock
  955. * taken.
  956. * @wq_head: the waitqueue to wait on
  957. * @condition: a C expression for the event to wait for
  958. * @lock: a locked spinlock_t, which will be released before schedule()
  959. * and reacquired afterwards.
  960. *
  961. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  962. * @condition evaluates to true. The @condition is checked each time
  963. * the waitqueue @wq_head is woken up.
  964. *
  965. * wake_up() has to be called after changing any variable that could
  966. * change the result of the wait condition.
  967. *
  968. * This is supposed to be called while holding the lock. The lock is
  969. * dropped before going to sleep and is reacquired afterwards.
  970. */
  971. #define wait_event_lock_irq(wq_head, condition, lock) \
  972. do { \
  973. if (condition) \
  974. break; \
  975. __wait_event_lock_irq(wq_head, condition, lock, ); \
  976. } while (0)
  977. #define __wait_event_interruptible_lock_irq(wq_head, condition, lock, cmd) \
  978. ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \
  979. spin_unlock_irq(&lock); \
  980. cmd; \
  981. schedule(); \
  982. spin_lock_irq(&lock))
  983. /**
  984. * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
  985. * The condition is checked under the lock. This is expected to
  986. * be called with the lock taken.
  987. * @wq_head: the waitqueue to wait on
  988. * @condition: a C expression for the event to wait for
  989. * @lock: a locked spinlock_t, which will be released before cmd and
  990. * schedule() and reacquired afterwards.
  991. * @cmd: a command which is invoked outside the critical section before
  992. * sleep
  993. *
  994. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  995. * @condition evaluates to true or a signal is received. The @condition is
  996. * checked each time the waitqueue @wq_head is woken up.
  997. *
  998. * wake_up() has to be called after changing any variable that could
  999. * change the result of the wait condition.
  1000. *
  1001. * This is supposed to be called while holding the lock. The lock is
  1002. * dropped before invoking the cmd and going to sleep and is reacquired
  1003. * afterwards.
  1004. *
  1005. * The macro will return -ERESTARTSYS if it was interrupted by a signal
  1006. * and 0 if @condition evaluated to true.
  1007. */
  1008. #define wait_event_interruptible_lock_irq_cmd(wq_head, condition, lock, cmd) \
  1009. ({ \
  1010. int __ret = 0; \
  1011. if (!(condition)) \
  1012. __ret = __wait_event_interruptible_lock_irq(wq_head, \
  1013. condition, lock, cmd); \
  1014. __ret; \
  1015. })
  1016. /**
  1017. * wait_event_interruptible_lock_irq - sleep until a condition gets true.
  1018. * The condition is checked under the lock. This is expected
  1019. * to be called with the lock taken.
  1020. * @wq_head: the waitqueue to wait on
  1021. * @condition: a C expression for the event to wait for
  1022. * @lock: a locked spinlock_t, which will be released before schedule()
  1023. * and reacquired afterwards.
  1024. *
  1025. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  1026. * @condition evaluates to true or signal is received. The @condition is
  1027. * checked each time the waitqueue @wq_head is woken up.
  1028. *
  1029. * wake_up() has to be called after changing any variable that could
  1030. * change the result of the wait condition.
  1031. *
  1032. * This is supposed to be called while holding the lock. The lock is
  1033. * dropped before going to sleep and is reacquired afterwards.
  1034. *
  1035. * The macro will return -ERESTARTSYS if it was interrupted by a signal
  1036. * and 0 if @condition evaluated to true.
  1037. */
  1038. #define wait_event_interruptible_lock_irq(wq_head, condition, lock) \
  1039. ({ \
  1040. int __ret = 0; \
  1041. if (!(condition)) \
  1042. __ret = __wait_event_interruptible_lock_irq(wq_head, \
  1043. condition, lock,); \
  1044. __ret; \
  1045. })
  1046. #define __wait_event_lock_irq_timeout(wq_head, condition, lock, timeout, state) \
  1047. ___wait_event(wq_head, ___wait_cond_timeout(condition), \
  1048. state, 0, timeout, \
  1049. spin_unlock_irq(&lock); \
  1050. __ret = schedule_timeout(__ret); \
  1051. spin_lock_irq(&lock));
  1052. /**
  1053. * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets
  1054. * true or a timeout elapses. The condition is checked under
  1055. * the lock. This is expected to be called with the lock taken.
  1056. * @wq_head: the waitqueue to wait on
  1057. * @condition: a C expression for the event to wait for
  1058. * @lock: a locked spinlock_t, which will be released before schedule()
  1059. * and reacquired afterwards.
  1060. * @timeout: timeout, in jiffies
  1061. *
  1062. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  1063. * @condition evaluates to true or signal is received. The @condition is
  1064. * checked each time the waitqueue @wq_head is woken up.
  1065. *
  1066. * wake_up() has to be called after changing any variable that could
  1067. * change the result of the wait condition.
  1068. *
  1069. * This is supposed to be called while holding the lock. The lock is
  1070. * dropped before going to sleep and is reacquired afterwards.
  1071. *
  1072. * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
  1073. * was interrupted by a signal, and the remaining jiffies otherwise
  1074. * if the condition evaluated to true before the timeout elapsed.
  1075. */
  1076. #define wait_event_interruptible_lock_irq_timeout(wq_head, condition, lock, \
  1077. timeout) \
  1078. ({ \
  1079. long __ret = timeout; \
  1080. if (!___wait_cond_timeout(condition)) \
  1081. __ret = __wait_event_lock_irq_timeout( \
  1082. wq_head, condition, lock, timeout, \
  1083. TASK_INTERRUPTIBLE); \
  1084. __ret; \
  1085. })
  1086. #define wait_event_lock_irq_timeout(wq_head, condition, lock, timeout) \
  1087. ({ \
  1088. long __ret = timeout; \
  1089. if (!___wait_cond_timeout(condition)) \
  1090. __ret = __wait_event_lock_irq_timeout( \
  1091. wq_head, condition, lock, timeout, \
  1092. TASK_UNINTERRUPTIBLE); \
  1093. __ret; \
  1094. })
  1095. /*
  1096. * Waitqueues which are removed from the waitqueue_head at wakeup time
  1097. */
  1098. void prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
  1099. bool prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
  1100. long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
  1101. void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
  1102. long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout);
  1103. int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
  1104. int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
  1105. #define DEFINE_WAIT_FUNC(name, function) \
  1106. struct wait_queue_entry name = { \
  1107. .private = current, \
  1108. .func = function, \
  1109. .entry = LIST_HEAD_INIT((name).entry), \
  1110. }
  1111. #define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
  1112. #define init_wait(wait) \
  1113. do { \
  1114. (wait)->private = current; \
  1115. (wait)->func = autoremove_wake_function; \
  1116. INIT_LIST_HEAD(&(wait)->entry); \
  1117. (wait)->flags = 0; \
  1118. } while (0)
  1119. typedef int (*task_call_f)(struct task_struct *p, void *arg);
  1120. extern int task_call_func(struct task_struct *p, task_call_f func, void *arg);
  1121. #endif /* _LINUX_WAIT_H */