wait.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Generic waiting primitives.
  4. *
  5. * (C) 2004 Nadia Yvette Chambers, Oracle
  6. */
  7. #include <trace/hooks/sched.h>
  8. void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *key)
  9. {
  10. spin_lock_init(&wq_head->lock);
  11. lockdep_set_class_and_name(&wq_head->lock, key, name);
  12. INIT_LIST_HEAD(&wq_head->head);
  13. }
  14. EXPORT_SYMBOL(__init_waitqueue_head);
  15. void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
  16. {
  17. unsigned long flags;
  18. wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
  19. spin_lock_irqsave(&wq_head->lock, flags);
  20. __add_wait_queue(wq_head, wq_entry);
  21. spin_unlock_irqrestore(&wq_head->lock, flags);
  22. }
  23. EXPORT_SYMBOL(add_wait_queue);
  24. void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
  25. {
  26. unsigned long flags;
  27. wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
  28. spin_lock_irqsave(&wq_head->lock, flags);
  29. __add_wait_queue_entry_tail(wq_head, wq_entry);
  30. spin_unlock_irqrestore(&wq_head->lock, flags);
  31. }
  32. EXPORT_SYMBOL(add_wait_queue_exclusive);
  33. void add_wait_queue_priority(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
  34. {
  35. unsigned long flags;
  36. wq_entry->flags |= WQ_FLAG_EXCLUSIVE | WQ_FLAG_PRIORITY;
  37. spin_lock_irqsave(&wq_head->lock, flags);
  38. __add_wait_queue(wq_head, wq_entry);
  39. spin_unlock_irqrestore(&wq_head->lock, flags);
  40. }
  41. EXPORT_SYMBOL_GPL(add_wait_queue_priority);
  42. void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
  43. {
  44. unsigned long flags;
  45. spin_lock_irqsave(&wq_head->lock, flags);
  46. __remove_wait_queue(wq_head, wq_entry);
  47. spin_unlock_irqrestore(&wq_head->lock, flags);
  48. }
  49. EXPORT_SYMBOL(remove_wait_queue);
  50. /*
  51. * Scan threshold to break wait queue walk.
  52. * This allows a waker to take a break from holding the
  53. * wait queue lock during the wait queue walk.
  54. */
  55. #define WAITQUEUE_WALK_BREAK_CNT 64
  56. /*
  57. * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
  58. * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
  59. * number) then we wake that number of exclusive tasks, and potentially all
  60. * the non-exclusive tasks. Normally, exclusive tasks will be at the end of
  61. * the list and any non-exclusive tasks will be woken first. A priority task
  62. * may be at the head of the list, and can consume the event without any other
  63. * tasks being woken.
  64. *
  65. * There are circumstances in which we can try to wake a task which has already
  66. * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
  67. * zero in this (rare) case, and we handle it by continuing to scan the queue.
  68. */
  69. static int __wake_up_common(struct wait_queue_head *wq_head, unsigned int mode,
  70. int nr_exclusive, int wake_flags, void *key,
  71. wait_queue_entry_t *bookmark)
  72. {
  73. wait_queue_entry_t *curr, *next;
  74. int cnt = 0;
  75. lockdep_assert_held(&wq_head->lock);
  76. if (bookmark && (bookmark->flags & WQ_FLAG_BOOKMARK)) {
  77. curr = list_next_entry(bookmark, entry);
  78. list_del(&bookmark->entry);
  79. bookmark->flags = 0;
  80. } else
  81. curr = list_first_entry(&wq_head->head, wait_queue_entry_t, entry);
  82. if (&curr->entry == &wq_head->head)
  83. return nr_exclusive;
  84. list_for_each_entry_safe_from(curr, next, &wq_head->head, entry) {
  85. unsigned flags = curr->flags;
  86. int ret;
  87. if (flags & WQ_FLAG_BOOKMARK)
  88. continue;
  89. ret = curr->func(curr, mode, wake_flags, key);
  90. if (ret < 0)
  91. break;
  92. if (ret && (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
  93. break;
  94. if (bookmark && (++cnt > WAITQUEUE_WALK_BREAK_CNT) &&
  95. (&next->entry != &wq_head->head)) {
  96. bookmark->flags = WQ_FLAG_BOOKMARK;
  97. list_add_tail(&bookmark->entry, &next->entry);
  98. break;
  99. }
  100. }
  101. return nr_exclusive;
  102. }
  103. static int __wake_up_common_lock(struct wait_queue_head *wq_head, unsigned int mode,
  104. int nr_exclusive, int wake_flags, void *key)
  105. {
  106. unsigned long flags;
  107. wait_queue_entry_t bookmark;
  108. int remaining = nr_exclusive;
  109. bookmark.flags = 0;
  110. bookmark.private = NULL;
  111. bookmark.func = NULL;
  112. INIT_LIST_HEAD(&bookmark.entry);
  113. do {
  114. spin_lock_irqsave(&wq_head->lock, flags);
  115. remaining = __wake_up_common(wq_head, mode, remaining,
  116. wake_flags, key, &bookmark);
  117. spin_unlock_irqrestore(&wq_head->lock, flags);
  118. } while (bookmark.flags & WQ_FLAG_BOOKMARK);
  119. return nr_exclusive - remaining;
  120. }
  121. /**
  122. * __wake_up - wake up threads blocked on a waitqueue.
  123. * @wq_head: the waitqueue
  124. * @mode: which threads
  125. * @nr_exclusive: how many wake-one or wake-many threads to wake up
  126. * @key: is directly passed to the wakeup function
  127. *
  128. * If this function wakes up a task, it executes a full memory barrier
  129. * before accessing the task state. Returns the number of exclusive
  130. * tasks that were awaken.
  131. */
  132. int __wake_up(struct wait_queue_head *wq_head, unsigned int mode,
  133. int nr_exclusive, void *key)
  134. {
  135. return __wake_up_common_lock(wq_head, mode, nr_exclusive, 0, key);
  136. }
  137. EXPORT_SYMBOL(__wake_up);
  138. /*
  139. * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
  140. */
  141. void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr)
  142. {
  143. __wake_up_common(wq_head, mode, nr, 0, NULL, NULL);
  144. }
  145. EXPORT_SYMBOL_GPL(__wake_up_locked);
  146. void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key)
  147. {
  148. __wake_up_common(wq_head, mode, 1, 0, key, NULL);
  149. }
  150. EXPORT_SYMBOL_GPL(__wake_up_locked_key);
  151. void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head,
  152. unsigned int mode, void *key, wait_queue_entry_t *bookmark)
  153. {
  154. __wake_up_common(wq_head, mode, 1, 0, key, bookmark);
  155. }
  156. EXPORT_SYMBOL_GPL(__wake_up_locked_key_bookmark);
  157. /**
  158. * __wake_up_sync_key - wake up threads blocked on a waitqueue.
  159. * @wq_head: the waitqueue
  160. * @mode: which threads
  161. * @key: opaque value to be passed to wakeup targets
  162. *
  163. * The sync wakeup differs that the waker knows that it will schedule
  164. * away soon, so while the target thread will be woken up, it will not
  165. * be migrated to another CPU - ie. the two threads are 'synchronized'
  166. * with each other. This can prevent needless bouncing between CPUs.
  167. *
  168. * On UP it can prevent extra preemption.
  169. *
  170. * If this function wakes up a task, it executes a full memory barrier before
  171. * accessing the task state.
  172. */
  173. void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode,
  174. void *key)
  175. {
  176. int wake_flags = WF_SYNC;
  177. if (unlikely(!wq_head))
  178. return;
  179. trace_android_vh_set_wake_flags(&wake_flags, &mode);
  180. __wake_up_common_lock(wq_head, mode, 1, wake_flags, key);
  181. }
  182. EXPORT_SYMBOL_GPL(__wake_up_sync_key);
  183. /**
  184. * __wake_up_locked_sync_key - wake up a thread blocked on a locked waitqueue.
  185. * @wq_head: the waitqueue
  186. * @mode: which threads
  187. * @key: opaque value to be passed to wakeup targets
  188. *
  189. * The sync wakeup differs in that the waker knows that it will schedule
  190. * away soon, so while the target thread will be woken up, it will not
  191. * be migrated to another CPU - ie. the two threads are 'synchronized'
  192. * with each other. This can prevent needless bouncing between CPUs.
  193. *
  194. * On UP it can prevent extra preemption.
  195. *
  196. * If this function wakes up a task, it executes a full memory barrier before
  197. * accessing the task state.
  198. */
  199. void __wake_up_locked_sync_key(struct wait_queue_head *wq_head,
  200. unsigned int mode, void *key)
  201. {
  202. __wake_up_common(wq_head, mode, 1, WF_SYNC, key, NULL);
  203. }
  204. EXPORT_SYMBOL_GPL(__wake_up_locked_sync_key);
  205. /*
  206. * __wake_up_sync - see __wake_up_sync_key()
  207. */
  208. void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode)
  209. {
  210. __wake_up_sync_key(wq_head, mode, NULL);
  211. }
  212. EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
  213. void __wake_up_pollfree(struct wait_queue_head *wq_head)
  214. {
  215. __wake_up(wq_head, TASK_NORMAL, 0, poll_to_key(EPOLLHUP | POLLFREE));
  216. /* POLLFREE must have cleared the queue. */
  217. WARN_ON_ONCE(waitqueue_active(wq_head));
  218. }
  219. /*
  220. * Note: we use "set_current_state()" _after_ the wait-queue add,
  221. * because we need a memory barrier there on SMP, so that any
  222. * wake-function that tests for the wait-queue being active
  223. * will be guaranteed to see waitqueue addition _or_ subsequent
  224. * tests in this thread will see the wakeup having taken place.
  225. *
  226. * The spin_unlock() itself is semi-permeable and only protects
  227. * one way (it only protects stuff inside the critical region and
  228. * stops them from bleeding out - it would still allow subsequent
  229. * loads to move into the critical region).
  230. */
  231. void
  232. prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
  233. {
  234. unsigned long flags;
  235. wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
  236. spin_lock_irqsave(&wq_head->lock, flags);
  237. if (list_empty(&wq_entry->entry))
  238. __add_wait_queue(wq_head, wq_entry);
  239. set_current_state(state);
  240. spin_unlock_irqrestore(&wq_head->lock, flags);
  241. }
  242. EXPORT_SYMBOL(prepare_to_wait);
  243. /* Returns true if we are the first waiter in the queue, false otherwise. */
  244. bool
  245. prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
  246. {
  247. unsigned long flags;
  248. bool was_empty = false;
  249. wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
  250. spin_lock_irqsave(&wq_head->lock, flags);
  251. if (list_empty(&wq_entry->entry)) {
  252. was_empty = list_empty(&wq_head->head);
  253. __add_wait_queue_entry_tail(wq_head, wq_entry);
  254. }
  255. set_current_state(state);
  256. spin_unlock_irqrestore(&wq_head->lock, flags);
  257. return was_empty;
  258. }
  259. EXPORT_SYMBOL(prepare_to_wait_exclusive);
  260. void init_wait_entry(struct wait_queue_entry *wq_entry, int flags)
  261. {
  262. wq_entry->flags = flags;
  263. wq_entry->private = current;
  264. wq_entry->func = autoremove_wake_function;
  265. INIT_LIST_HEAD(&wq_entry->entry);
  266. }
  267. EXPORT_SYMBOL(init_wait_entry);
  268. long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
  269. {
  270. unsigned long flags;
  271. long ret = 0;
  272. spin_lock_irqsave(&wq_head->lock, flags);
  273. if (signal_pending_state(state, current)) {
  274. /*
  275. * Exclusive waiter must not fail if it was selected by wakeup,
  276. * it should "consume" the condition we were waiting for.
  277. *
  278. * The caller will recheck the condition and return success if
  279. * we were already woken up, we can not miss the event because
  280. * wakeup locks/unlocks the same wq_head->lock.
  281. *
  282. * But we need to ensure that set-condition + wakeup after that
  283. * can't see us, it should wake up another exclusive waiter if
  284. * we fail.
  285. */
  286. list_del_init(&wq_entry->entry);
  287. ret = -ERESTARTSYS;
  288. } else {
  289. if (list_empty(&wq_entry->entry)) {
  290. if (wq_entry->flags & WQ_FLAG_EXCLUSIVE)
  291. __add_wait_queue_entry_tail(wq_head, wq_entry);
  292. else
  293. __add_wait_queue(wq_head, wq_entry);
  294. }
  295. set_current_state(state);
  296. }
  297. spin_unlock_irqrestore(&wq_head->lock, flags);
  298. return ret;
  299. }
  300. EXPORT_SYMBOL(prepare_to_wait_event);
  301. /*
  302. * Note! These two wait functions are entered with the
  303. * wait-queue lock held (and interrupts off in the _irq
  304. * case), so there is no race with testing the wakeup
  305. * condition in the caller before they add the wait
  306. * entry to the wake queue.
  307. */
  308. int do_wait_intr(wait_queue_head_t *wq, wait_queue_entry_t *wait)
  309. {
  310. if (likely(list_empty(&wait->entry)))
  311. __add_wait_queue_entry_tail(wq, wait);
  312. set_current_state(TASK_INTERRUPTIBLE);
  313. if (signal_pending(current))
  314. return -ERESTARTSYS;
  315. spin_unlock(&wq->lock);
  316. schedule();
  317. spin_lock(&wq->lock);
  318. return 0;
  319. }
  320. EXPORT_SYMBOL(do_wait_intr);
  321. int do_wait_intr_irq(wait_queue_head_t *wq, wait_queue_entry_t *wait)
  322. {
  323. if (likely(list_empty(&wait->entry)))
  324. __add_wait_queue_entry_tail(wq, wait);
  325. set_current_state(TASK_INTERRUPTIBLE);
  326. if (signal_pending(current))
  327. return -ERESTARTSYS;
  328. spin_unlock_irq(&wq->lock);
  329. schedule();
  330. spin_lock_irq(&wq->lock);
  331. return 0;
  332. }
  333. EXPORT_SYMBOL(do_wait_intr_irq);
  334. /**
  335. * finish_wait - clean up after waiting in a queue
  336. * @wq_head: waitqueue waited on
  337. * @wq_entry: wait descriptor
  338. *
  339. * Sets current thread back to running state and removes
  340. * the wait descriptor from the given waitqueue if still
  341. * queued.
  342. */
  343. void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
  344. {
  345. unsigned long flags;
  346. __set_current_state(TASK_RUNNING);
  347. /*
  348. * We can check for list emptiness outside the lock
  349. * IFF:
  350. * - we use the "careful" check that verifies both
  351. * the next and prev pointers, so that there cannot
  352. * be any half-pending updates in progress on other
  353. * CPU's that we haven't seen yet (and that might
  354. * still change the stack area.
  355. * and
  356. * - all other users take the lock (ie we can only
  357. * have _one_ other CPU that looks at or modifies
  358. * the list).
  359. */
  360. if (!list_empty_careful(&wq_entry->entry)) {
  361. spin_lock_irqsave(&wq_head->lock, flags);
  362. list_del_init(&wq_entry->entry);
  363. spin_unlock_irqrestore(&wq_head->lock, flags);
  364. }
  365. }
  366. EXPORT_SYMBOL(finish_wait);
  367. int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
  368. {
  369. int ret = default_wake_function(wq_entry, mode, sync, key);
  370. if (ret)
  371. list_del_init_careful(&wq_entry->entry);
  372. return ret;
  373. }
  374. EXPORT_SYMBOL(autoremove_wake_function);
  375. static inline bool is_kthread_should_stop(void)
  376. {
  377. return (current->flags & PF_KTHREAD) && kthread_should_stop();
  378. }
  379. /*
  380. * DEFINE_WAIT_FUNC(wait, woken_wake_func);
  381. *
  382. * add_wait_queue(&wq_head, &wait);
  383. * for (;;) {
  384. * if (condition)
  385. * break;
  386. *
  387. * // in wait_woken() // in woken_wake_function()
  388. *
  389. * p->state = mode; wq_entry->flags |= WQ_FLAG_WOKEN;
  390. * smp_mb(); // A try_to_wake_up():
  391. * if (!(wq_entry->flags & WQ_FLAG_WOKEN)) <full barrier>
  392. * schedule() if (p->state & mode)
  393. * p->state = TASK_RUNNING; p->state = TASK_RUNNING;
  394. * wq_entry->flags &= ~WQ_FLAG_WOKEN; ~~~~~~~~~~~~~~~~~~
  395. * smp_mb(); // B condition = true;
  396. * } smp_mb(); // C
  397. * remove_wait_queue(&wq_head, &wait); wq_entry->flags |= WQ_FLAG_WOKEN;
  398. */
  399. long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout)
  400. {
  401. /*
  402. * The below executes an smp_mb(), which matches with the full barrier
  403. * executed by the try_to_wake_up() in woken_wake_function() such that
  404. * either we see the store to wq_entry->flags in woken_wake_function()
  405. * or woken_wake_function() sees our store to current->state.
  406. */
  407. set_current_state(mode); /* A */
  408. if (!(wq_entry->flags & WQ_FLAG_WOKEN) && !is_kthread_should_stop())
  409. timeout = schedule_timeout(timeout);
  410. __set_current_state(TASK_RUNNING);
  411. /*
  412. * The below executes an smp_mb(), which matches with the smp_mb() (C)
  413. * in woken_wake_function() such that either we see the wait condition
  414. * being true or the store to wq_entry->flags in woken_wake_function()
  415. * follows ours in the coherence order.
  416. */
  417. smp_store_mb(wq_entry->flags, wq_entry->flags & ~WQ_FLAG_WOKEN); /* B */
  418. return timeout;
  419. }
  420. EXPORT_SYMBOL(wait_woken);
  421. int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
  422. {
  423. /* Pairs with the smp_store_mb() in wait_woken(). */
  424. smp_mb(); /* C */
  425. wq_entry->flags |= WQ_FLAG_WOKEN;
  426. return default_wake_function(wq_entry, mode, sync, key);
  427. }
  428. EXPORT_SYMBOL(woken_wake_function);